Compare commits
935 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2f4af09c01 | ||
|
|
37960cbc2f | ||
|
|
c67d1bf120 | ||
|
|
c5b3a675fa | ||
|
|
b690304eed | ||
|
|
9171d6ef65 | ||
|
|
6386b45c08 | ||
|
|
1f659204a2 | ||
|
|
f9f6fd0421 | ||
|
|
85620dfe93 | ||
|
|
a8e4f64ff3 | ||
|
|
ca5c6e3160 | ||
|
|
b23659927c | ||
|
|
b912e9ab41 | ||
|
|
c1a49be639 | ||
|
|
03172b89e2 | ||
|
|
b517c791e9 | ||
|
|
14aef52004 | ||
|
|
67b20125e4 | ||
|
|
d4b822d697 | ||
|
|
1b63291ee2 | ||
|
|
aa7244a9a4 | ||
|
|
2a79ea0332 | ||
|
|
6e5c61d917 | ||
|
|
02e7de6367 | ||
|
|
cec12f4c76 | ||
|
|
da676ac298 | ||
|
|
18ec933085 | ||
|
|
c31d2c3fdc | ||
|
|
8778828a03 | ||
|
|
48b212dd8e | ||
|
|
be7de911c4 | ||
|
|
8cad407e0b | ||
|
|
85d2187c20 | ||
|
|
98d3f94996 | ||
|
|
173284903b | ||
|
|
c70240b893 | ||
|
|
8ba2136e06 | ||
|
|
2dce5d9442 | ||
|
|
f28b063091 | ||
|
|
90abea5b7a | ||
|
|
c5b2a8441b | ||
|
|
0f5ca83418 | ||
|
|
8a6b13c239 | ||
|
|
8e8a792d9d | ||
|
|
95e0acbb26 | ||
|
|
55037e6e54 | ||
|
|
289e1d8b2a | ||
|
|
e07918abe3 | ||
|
|
ffea6fcf09 | ||
|
|
11b2220696 | ||
|
|
aa8450a2a1 | ||
|
|
87cce344f6 | ||
|
|
7d4a2d2b68 | ||
|
|
cfc8b92dff | ||
|
|
c4e12dc846 | ||
|
|
a94a9c37fa | ||
|
|
79b6a43467 | ||
|
|
928de04f7a | ||
|
|
93fd248b52 | ||
|
|
2a7b123895 | ||
|
|
b3c56b53fb | ||
|
|
0ef3e359d8 | ||
|
|
f24d8127ab | ||
|
|
7875d472bc | ||
|
|
711adb9652 | ||
|
|
e6b4ea7618 | ||
|
|
466e95bb59 | ||
|
|
881f98e511 | ||
|
|
cbf4bb62e0 | ||
|
|
682482459d | ||
|
|
b87fae0049 | ||
|
|
b8b44c879f | ||
|
|
f53d1de87f | ||
|
|
5a18d437ce | ||
|
|
93eb549a83 | ||
|
|
fe3c39b583 | ||
|
|
84d400487f | ||
|
|
3afa499885 | ||
|
|
13d015cf93 | ||
|
|
876b79b8d8 | ||
|
|
3d74efa6b1 | ||
|
|
68d299e719 | ||
|
|
f9c5636c2d | ||
|
|
9b10118d34 | ||
|
|
0e3211f4ad | ||
|
|
2e4d9124ad | ||
|
|
6fef4c21b9 | ||
|
|
8e1bbd989a | ||
|
|
152d7cd95b | ||
|
|
74080bf108 | ||
|
|
647a209c73 | ||
|
|
0d057c777a | ||
|
|
275f7a63e8 | ||
|
|
97fe57bba9 | ||
|
|
88c1bb0720 | ||
|
|
1fdafaf72f | ||
|
|
5fe4bb6b36 | ||
|
|
99b733d44c | ||
|
|
b4ac05523b | ||
|
|
c7eacba41c | ||
|
|
1887c25279 | ||
|
|
c9b0f595b9 | ||
|
|
8bb580abfc | ||
|
|
af9cb5f5f2 | ||
|
|
9497dfd804 | ||
|
|
da55a05587 | ||
|
|
3fc4d6f620 | ||
|
|
67a8f37df0 | ||
|
|
075c429021 | ||
|
|
df0c678167 | ||
|
|
f108873c48 | ||
|
|
871b450dbd | ||
|
|
f71e192343 | ||
|
|
b3f81e75f6 | ||
|
|
a71e0483c9 | ||
|
|
f2d49ec21a | ||
|
|
4a9d9c8585 | ||
|
|
c885777ac6 | ||
|
|
fe3aca70c3 | ||
|
|
c4848f9b4f | ||
|
|
838d4dafbd | ||
|
|
8c663f93f7 | ||
|
|
b4cb7edf85 | ||
|
|
e96fdcd5ec | ||
|
|
6ef678663e | ||
|
|
f737a027cf | ||
|
|
547efecd82 | ||
|
|
65aa2bc614 | ||
|
|
de4421d6a3 | ||
|
|
6c3467e300 | ||
|
|
33554651e9 | ||
|
|
451d9057f3 | ||
|
|
c82aef0a56 | ||
|
|
1e53bf2789 | ||
|
|
d48ff93ba3 | ||
|
|
c8489a8f0c | ||
|
|
2680772d4b | ||
|
|
567f7bdd05 | ||
|
|
6cd255d516 | ||
|
|
e79829b5b3 | ||
|
|
fd3f02637a | ||
|
|
e019f21bda | ||
|
|
e9ac7b0fb7 | ||
|
|
1debd722b5 | ||
|
|
e7f6051f19 | ||
|
|
6717295e18 | ||
|
|
00cff1aac5 | ||
|
|
9722531817 | ||
|
|
5c6bfae4c7 | ||
|
|
5f51ef0b40 | ||
|
|
7e266293e6 | ||
|
|
eb6871ecd9 | ||
|
|
9cdd981ce7 | ||
|
|
bd8020aba8 | ||
|
|
09bc49bd51 | ||
|
|
82f0471d1b | ||
|
|
0bf2d84f96 | ||
|
|
6a95f412c9 | ||
|
|
7575c24037 | ||
|
|
43f973c4cf | ||
|
|
1b453728a3 | ||
|
|
a6c146bd00 | ||
|
|
a35cbb3ff3 | ||
|
|
c080f04e66 | ||
|
|
2167ba0111 | ||
|
|
4e6d717f39 | ||
|
|
845e251fa9 | ||
|
|
d1a8f0b786 | ||
|
|
dac19d7272 | ||
|
|
7624c8b9bb | ||
|
|
19fb1086b2 | ||
|
|
a5e23a40ff | ||
|
|
1ad2b7b699 | ||
|
|
b5049d541f | ||
|
|
e0055609bb | ||
|
|
e8ce348da1 | ||
|
|
6bfa162342 | ||
|
|
b4add82bb6 | ||
|
|
3bda8f755c | ||
|
|
3ca6330661 | ||
|
|
3d9000d5b5 | ||
|
|
3163a660aa | ||
|
|
0dadfd1b3d | ||
|
|
98f76008c7 | ||
|
|
8da0b7cf03 | ||
|
|
4315f93421 | ||
|
|
ddb5d7043a | ||
|
|
f903cae6ff | ||
|
|
40d59c1961 | ||
|
|
7090bcc8e0 | ||
|
|
c222bde14b | ||
|
|
4e06a72632 | ||
|
|
16040bc544 | ||
|
|
cc2d887e0e | ||
|
|
c1b4b24236 | ||
|
|
feaf8dfb9a | ||
|
|
628ef081d1 | ||
|
|
44dff36ff7 | ||
|
|
b97d53b29c | ||
|
|
00af9881b0 | ||
|
|
e09196d626 | ||
|
|
1a5775e2e8 | ||
|
|
e2579b1f5a | ||
|
|
7824e19d20 | ||
|
|
317305d5f9 | ||
|
|
e4e117faab | ||
|
|
e8176fe978 | ||
|
|
828602d672 | ||
|
|
d9224fbc65 | ||
|
|
51dad1d130 | ||
|
|
4593b146be | ||
|
|
f21d650ed4 | ||
|
|
a4f6705874 | ||
|
|
b35b537e3f | ||
|
|
5c52d5ffc7 | ||
|
|
f0808bb2e5 | ||
|
|
a6dee21092 | ||
|
|
6f781c5e7a | ||
|
|
f8ca859790 | ||
|
|
b78521cd69 | ||
|
|
76e2713ffe | ||
|
|
b5d291ea88 | ||
|
|
eb9172eecb | ||
|
|
97a4c120e9 | ||
|
|
64bddf47d8 | ||
|
|
4ed45ce543 | ||
|
|
ad511b0eb8 | ||
|
|
cb0eaeaad8 | ||
|
|
f3f0041ad0 | ||
|
|
d0027c3c41 | ||
|
|
cb7fc99368 | ||
|
|
a4383051d9 | ||
|
|
e7ae49f9c9 | ||
|
|
153d4be032 | ||
|
|
dfd99b6d8f | ||
|
|
c4b1d394d6 | ||
|
|
c4131c2798 | ||
|
|
c9d502e6fa | ||
|
|
677e80c0f8 | ||
|
|
aa85af4d1a | ||
|
|
ae731d232f | ||
|
|
a317d220ed | ||
|
|
3e1221a01c | ||
|
|
c1f6ca6697 | ||
|
|
36fc2f98ed | ||
|
|
556524c715 | ||
|
|
428f288379 | ||
|
|
cde801282d | ||
|
|
6cf0008469 | ||
|
|
7b0330a98c | ||
|
|
cc457f1798 | ||
|
|
ca0d31b09a | ||
|
|
445a9bd827 | ||
|
|
d8d25a308f | ||
|
|
c19e6ce773 | ||
|
|
d3c853a3be | ||
|
|
59d3639396 | ||
|
|
027e17468a | ||
|
|
45ea161f8d | ||
|
|
7b8a456f68 | ||
|
|
b43906f6ee | ||
|
|
6a66f142d4 | ||
|
|
5982965839 | ||
|
|
bfb92a27b7 | ||
|
|
8565cefe4e | ||
|
|
8cdf2106b0 | ||
|
|
35fafb837b | ||
|
|
274bbad5cb | ||
|
|
5c451d1690 | ||
|
|
c987313431 | ||
|
|
2ecaab55a6 | ||
|
|
3e792ae2a2 | ||
|
|
6df6ac0f34 | ||
|
|
4cc500a041 | ||
|
|
d8e28830cf | ||
|
|
3e16ec457a | ||
|
|
e5d378931d | ||
|
|
6128304f6e | ||
|
|
e63a10e505 | ||
|
|
5434088c51 | ||
|
|
a773cf48d8 | ||
|
|
386dd56856 | ||
|
|
f714840da7 | ||
|
|
7c9ef76f66 | ||
|
|
cffdb01279 | ||
|
|
970ddb424b | ||
|
|
b390a2a0b9 | ||
|
|
cce5d7152a | ||
|
|
90158f1e33 | ||
|
|
26624552be | ||
|
|
c606c76323 | ||
|
|
d674263eb7 | ||
|
|
e7d3b49a20 | ||
|
|
5df61ab96b | ||
|
|
3456b03b12 | ||
|
|
f6fb27e8f0 | ||
|
|
8368ab76aa | ||
|
|
3e83643320 | ||
|
|
2eb52ca5f4 | ||
|
|
705e196b6c | ||
|
|
7b5223d83d | ||
|
|
f164085227 | ||
|
|
31bf6f0c25 | ||
|
|
48191dd748 | ||
|
|
c4f29d24da | ||
|
|
db7890660e | ||
|
|
9adc33efbb | ||
|
|
8f65aba04b | ||
|
|
3a0082f0f1 | ||
|
|
14792cdbc6 | ||
|
|
4939987eb8 | ||
|
|
4bca62a0bd | ||
|
|
82e2be4239 | ||
|
|
4550ac6fff | ||
|
|
97856bfebf | ||
|
|
a60a0e52bb | ||
|
|
83a67a1d21 | ||
|
|
12391ec4ba | ||
|
|
e65ed2e44f | ||
|
|
d90044b847 | ||
|
|
d8c1f93de6 | ||
|
|
54d243cd98 | ||
|
|
d74e4642e3 | ||
|
|
a51488cbaa | ||
|
|
04848dfa1c | ||
|
|
78d18d8fc8 | ||
|
|
dc819afa44 | ||
|
|
4a564336fe | ||
|
|
6b7ced80fe | ||
|
|
f60bbdf86b | ||
|
|
8c79f87f02 | ||
|
|
f3beb1236a | ||
|
|
934bed47fa | ||
|
|
038bcd9079 | ||
|
|
6d70f6a4ac | ||
|
|
ce93b2681b | ||
|
|
8d036ed6d8 | ||
|
|
9c53cc1b83 | ||
|
|
3514e89eb3 | ||
|
|
6ff12f5f01 | ||
|
|
ee2a436a5b | ||
|
|
a896125490 | ||
|
|
e083471ec4 | ||
|
|
de9b64834e | ||
|
|
919441d9c4 | ||
|
|
80d31113e5 | ||
|
|
7e2b79984e | ||
|
|
d54cf77356 | ||
|
|
951b6b203b | ||
|
|
44e23b7f4f | ||
|
|
c22a387695 | ||
|
|
1ab4d6a6aa | ||
|
|
96c0ce1f0c | ||
|
|
fe11e9047d | ||
|
|
ce0e17b62b | ||
|
|
303be1866d | ||
|
|
a6113b2315 | ||
|
|
3ca046b408 | ||
|
|
4ec45753e6 | ||
|
|
e6ea5c2703 | ||
|
|
790833f3b2 | ||
|
|
02aecb2fc1 | ||
|
|
7cbca43eb1 | ||
|
|
2f564437ae | ||
|
|
ae4ded7fd1 | ||
|
|
bdd094bc39 | ||
|
|
e6fa410778 | ||
|
|
350c5ff8f8 | ||
|
|
f139a19238 | ||
|
|
e90efd73a2 | ||
|
|
81c907b4bf | ||
|
|
ab49471f33 | ||
|
|
aabf053d2f | ||
|
|
f839bb5a0a | ||
|
|
91130e884b | ||
|
|
2ff655a745 | ||
|
|
0422eda6a2 | ||
|
|
31e6f60847 | ||
|
|
7742238495 | ||
|
|
3ad41fe89d | ||
|
|
f96ed3769f | ||
|
|
a75fafdbe2 | ||
|
|
a58b7874ef | ||
|
|
6990de9c94 | ||
|
|
75a8e81f8f | ||
|
|
519c0077a9 | ||
|
|
734d07a532 | ||
|
|
df93102235 | ||
|
|
39f3d5493b | ||
|
|
14a7ae8586 | ||
|
|
692ff41ef7 | ||
|
|
86409fa93d | ||
|
|
7bc47a14cc | ||
|
|
4a31b31ca6 | ||
|
|
9263be8cca | ||
|
|
73e308079a | ||
|
|
08b24620c0 | ||
|
|
95675b0c9a | ||
|
|
251c1ef6da | ||
|
|
0fa430c1da | ||
|
|
f60b6eb82e | ||
|
|
1ebf6f146a | ||
|
|
8f7fe0405e | ||
|
|
9a34fd5c4a | ||
|
|
b9e3a8b5ac | ||
|
|
f794fe79e3 | ||
|
|
0f9e125cf3 | ||
|
|
d778d9493f | ||
|
|
70d2c2ccc9 | ||
|
|
9dea7020f0 | ||
|
|
990d074f7d | ||
|
|
e413f05397 | ||
|
|
d1b1fee080 | ||
|
|
9738d605e4 | ||
|
|
7ff8128f15 | ||
|
|
10099357b6 | ||
|
|
80b8ce89a4 | ||
|
|
0745736e28 | ||
|
|
0b766288ef | ||
|
|
598ca0569c | ||
|
|
d295ce5708 | ||
|
|
b5a3d79bce | ||
|
|
17a5ff51ff | ||
|
|
0784a0c33a | ||
|
|
3595cb1267 | ||
|
|
0bcb1b679d | ||
|
|
a3017c724e | ||
|
|
07859ef48b | ||
|
|
267d7bf0a9 | ||
|
|
be83dfc52a | ||
|
|
ca88ca753c | ||
|
|
f86d3538f6 | ||
|
|
1c3590078d | ||
|
|
aa158228f9 | ||
|
|
8747834c69 | ||
|
|
2c1e37197b | ||
|
|
50c10a5087 | ||
|
|
9f4ad873bc | ||
|
|
4683a623dc | ||
|
|
06899210a7 | ||
|
|
cbdab62c1e | ||
|
|
8df6112204 | ||
|
|
8e8ddf7233 | ||
|
|
311ab43d4c | ||
|
|
97692bc772 | ||
|
|
21016265e5 | ||
|
|
54120107ce | ||
|
|
9bf5990ea9 | ||
|
|
74f7cf24ae | ||
|
|
fb28aa847b | ||
|
|
0724205f35 | ||
|
|
b72cac4cf3 | ||
|
|
47d715f642 | ||
|
|
bd77f29fc4 | ||
|
|
d1e1205036 | ||
|
|
71753e21e0 | ||
|
|
fde3299bf3 | ||
|
|
1a1f00fa15 | ||
|
|
4a1efabda4 | ||
|
|
3b88a646ec | ||
|
|
2294e53a0b | ||
|
|
f0819cce75 | ||
|
|
1e11b4629f | ||
|
|
5c72a34fa8 | ||
|
|
b9277c8030 | ||
|
|
8c76e1353e | ||
|
|
ad382799b1 | ||
|
|
68de5a6f6a | ||
|
|
4ea31da889 | ||
|
|
0a796505c1 | ||
|
|
37749f4623 | ||
|
|
86e0d272f3 | ||
|
|
8527f22df1 | ||
|
|
b456292295 | ||
|
|
03fdbc3ec2 | ||
|
|
4c773f7068 | ||
|
|
d8e07f2c41 | ||
|
|
5412d730c1 | ||
|
|
fe9f23e632 | ||
|
|
422898d9b3 | ||
|
|
b686bb9c83 | ||
|
|
5e5cdc581d | ||
|
|
02cfa774be | ||
|
|
3a2f89b3c0 | ||
|
|
6135f072d2 | ||
|
|
7331659d3d | ||
|
|
e63a44b734 | ||
|
|
6b14c4ab1e | ||
|
|
4bf90ca67f | ||
|
|
e0655e24f2 | ||
|
|
bfc36aed89 | ||
|
|
be7f67268d | ||
|
|
a982baff27 | ||
|
|
51222cc664 | ||
|
|
f53c5a020e | ||
|
|
5b30bbda92 | ||
|
|
858e2a43df | ||
|
|
df9894e275 | ||
|
|
ca77ee1c0e | ||
|
|
592f2f23a3 | ||
|
|
c49a80db41 | ||
|
|
46275c6547 | ||
|
|
0994ed9783 | ||
|
|
eb95353cb1 | ||
|
|
029758cb20 | ||
|
|
649035677f | ||
|
|
646d6917ed | ||
|
|
d9db7f3308 | ||
|
|
6a8c62f9fd | ||
|
|
4442382c16 | ||
|
|
00124c56d9 | ||
|
|
2c32c2149e | ||
|
|
734f258878 | ||
|
|
0e0c53bba4 | ||
|
|
5cc23ae052 | ||
|
|
6fd088f448 | ||
|
|
d6d770c1b1 | ||
|
|
b07df5cae1 | ||
|
|
c107728676 | ||
|
|
ba5215561f | ||
|
|
4eb45c9a0f | ||
|
|
187129a907 | ||
|
|
284a2b9021 | ||
|
|
0b53e30ecb | ||
|
|
bd2131ba34 | ||
|
|
73a41a725a | ||
|
|
1aec168c84 | ||
|
|
21a549a83b | ||
|
|
8a16a1a1a9 | ||
|
|
ad726b49b4 | ||
|
|
db2241066b | ||
|
|
f1cc16e788 | ||
|
|
3820a905e0 | ||
|
|
2042d4873c | ||
|
|
f9be783f3e | ||
|
|
23773bb32b | ||
|
|
2fd7545b6c | ||
|
|
71b97fd3ac | ||
|
|
03991c5d41 | ||
|
|
9c042a503b | ||
|
|
614060764d | ||
|
|
4a678ad70f | ||
|
|
a3ba8188d7 | ||
|
|
2760fc86af | ||
|
|
abb14aeec1 | ||
|
|
8ceb2a93fd | ||
|
|
c2f16ee846 | ||
|
|
071c004f8b | ||
|
|
6484453fc6 | ||
|
|
a0d0645128 | ||
|
|
1738eb24b1 | ||
|
|
253194e491 | ||
|
|
736e58dd68 | ||
|
|
907a171edd | ||
|
|
ed6d2a100f | ||
|
|
effe131090 | ||
|
|
01498a3e34 | ||
|
|
18063bf25c | ||
|
|
57f0176759 | ||
|
|
dbbed6f7f0 | ||
|
|
7fbfdceba3 | ||
|
|
9dda9fb903 | ||
|
|
f1418a50f0 | ||
|
|
017954e7ea | ||
|
|
806625cbff | ||
|
|
045e30f2c1 | ||
|
|
c6a9a94f94 | ||
|
|
8e7c00f3d4 | ||
|
|
d1ed1da8c6 | ||
|
|
23e8390997 | ||
|
|
71403be912 | ||
|
|
f28d02b7f2 | ||
|
|
e0cb814f3f | ||
|
|
98a08e1644 | ||
|
|
3047121255 | ||
|
|
5a7f92481e | ||
|
|
0d45c38782 | ||
|
|
56d1b227cf | ||
|
|
061fa0635c | ||
|
|
6e138f955e | ||
|
|
bea87a5a20 | ||
|
|
2b4eb87d77 | ||
|
|
799758e54f | ||
|
|
1f9abbee4d | ||
|
|
fdf0ae9167 | ||
|
|
00eb6f6bc9 | ||
|
|
66174692a2 | ||
|
|
849fcf0127 | ||
|
|
209680e89f | ||
|
|
e0c04a2da0 | ||
|
|
27d9bd04e5 | ||
|
|
511424a287 | ||
|
|
bebcf4f004 | ||
|
|
eafa775952 | ||
|
|
66b4a862e0 | ||
|
|
9603489dd3 | ||
|
|
b302c8a5f4 | ||
|
|
4de88e87bb | ||
|
|
b880796aef | ||
|
|
37a5d5d7a0 | ||
|
|
3cac262dd1 | ||
|
|
e6ab4db6b8 | ||
|
|
ca989eb0b3 | ||
|
|
d778d034e7 | ||
|
|
df08fd1f03 | ||
|
|
ac82f416a4 | ||
|
|
f7f9517b6a | ||
|
|
90cff10e2b | ||
|
|
81caf35926 | ||
|
|
5726cef3ca | ||
|
|
5fdf47b118 | ||
|
|
8b74a72b21 | ||
|
|
eec69d6796 | ||
|
|
0537a21b79 | ||
|
|
4c54ed8748 | ||
|
|
a4006e23a0 | ||
|
|
b17dc81540 | ||
|
|
d73c4f09f3 | ||
|
|
4c81201f95 | ||
|
|
cd8d511d3d | ||
|
|
899a2fa1c7 | ||
|
|
17e17da00d | ||
|
|
a5da9120f3 | ||
|
|
aa12d75d75 | ||
|
|
dd4a2d7419 | ||
|
|
6fcbdd5607 | ||
|
|
3831cc9e3b | ||
|
|
230fc0d186 | ||
|
|
7f9498f43f | ||
|
|
1cf322b7d4 | ||
|
|
3168e93730 | ||
|
|
0b1c824618 | ||
|
|
c851e022b7 | ||
|
|
6f45e303f5 | ||
|
|
5ad032826a | ||
|
|
84bf4624a4 | ||
|
|
dff37aa33d | ||
|
|
d12831eb07 | ||
|
|
4a36cd7035 | ||
|
|
00555c747e | ||
|
|
03490c811b | ||
|
|
48d2c03250 | ||
|
|
ed78854cea | ||
|
|
e60834838f | ||
|
|
d616d8a857 | ||
|
|
24cab7f9df | ||
|
|
b2536476c9 | ||
|
|
02c1a08a5b | ||
|
|
5c47ce456e | ||
|
|
8ea55f9dba | ||
|
|
80e3dce631 | ||
|
|
80fab03b63 | ||
|
|
730d2dc7be | ||
|
|
0ee9678190 | ||
|
|
34859c6d4b | ||
|
|
b1c99e88ac | ||
|
|
0104af6bcc | ||
|
|
224daee391 | ||
|
|
34ea1d2167 | ||
|
|
9d95937018 | ||
|
|
74a7889a3e | ||
|
|
fa01e640f5 | ||
|
|
f355374962 | ||
|
|
bda0fe3150 | ||
|
|
b70995dd60 | ||
|
|
4b6264da7d | ||
|
|
48919de301 | ||
|
|
eb3ded420e | ||
|
|
eb2934f0c1 | ||
|
|
b7438fe4e6 | ||
|
|
ce6cef6855 | ||
|
|
a966ccd17d | ||
|
|
493c714663 | ||
|
|
e959c5d71c | ||
|
|
4a2928eb49 | ||
|
|
af88772a78 | ||
|
|
1dce6918c2 | ||
|
|
9109148474 | ||
|
|
eaaf05a7cc | ||
|
|
52e21bc853 | ||
|
|
16e1a25bc0 | ||
|
|
958661cbb5 | ||
|
|
6019628f7d | ||
|
|
0987069e37 | ||
|
|
6a0372be6c | ||
|
|
c13afd56e8 | ||
|
|
96997d2b21 | ||
|
|
86a3319d41 | ||
|
|
a694ba93d9 | ||
|
|
9f60e84ce1 | ||
|
|
a9aaea0d67 | ||
|
|
572b1721b2 | ||
|
|
b0e1d4ce78 | ||
|
|
eb19c8af40 | ||
|
|
746f1585eb | ||
|
|
2d58a8d861 | ||
|
|
0037951b6e | ||
|
|
fbd1c5f51a | ||
|
|
1c6781757c | ||
|
|
b4e3956e69 | ||
|
|
c51229493b | ||
|
|
631d55aa22 | ||
|
|
8a291e1dc0 | ||
|
|
650dccfa9e | ||
|
|
d08b4b147d | ||
|
|
9a703befe6 | ||
|
|
9a1615768d | ||
|
|
37da0c647e | ||
|
|
2acb530ccd | ||
|
|
3e1fb17b70 | ||
|
|
a89d6b8e3d | ||
|
|
1c085f7d1a | ||
|
|
4b6585d249 | ||
|
|
9ffad7fceb | ||
|
|
18725679c4 | ||
|
|
ba8a8ad818 | ||
|
|
102ad60dee | ||
|
|
cb61e50b51 | ||
|
|
859ef52886 | ||
|
|
f04a1f220c | ||
|
|
cd380251b3 | ||
|
|
92cd1eed45 | ||
|
|
db32a24cb6 | ||
|
|
2d96940826 | ||
|
|
e730da1438 | ||
|
|
46ee8659b4 | ||
|
|
73a6b4ea11 | ||
|
|
c1b88c17cc | ||
|
|
a359e36e35 | ||
|
|
0a2e6d58a5 | ||
|
|
7e80afdd7f | ||
|
|
1b119557c2 | ||
|
|
7778fef6bb | ||
|
|
ea1803417f | ||
|
|
ea5094e842 | ||
|
|
5a974fb10c | ||
|
|
9acdeab73d | ||
|
|
d19b434ffc | ||
|
|
17a1eda702 | ||
|
|
7d50a0cfea | ||
|
|
ceff7bcca5 | ||
|
|
7d1734d033 | ||
|
|
03ec6adfd0 | ||
|
|
309b10f201 | ||
|
|
2a8e40f19f | ||
|
|
5f7bd2b1da | ||
|
|
c097ce9c32 | ||
|
|
caad314faa | ||
|
|
bc2ebe0021 | ||
|
|
21e8440423 | ||
|
|
11aa393ba7 | ||
|
|
d0c910a6f3 | ||
|
|
81c90ae430 | ||
|
|
d15a5ad4cc | ||
|
|
0ff246653b | ||
|
|
113bcbdb78 | ||
|
|
95411228db | ||
|
|
23774353b7 | ||
|
|
052b5262ff | ||
|
|
a2a5ec93d3 | ||
|
|
331c517a5b | ||
|
|
27a774cbe9 | ||
|
|
8e6787a302 | ||
|
|
59352d0ac2 | ||
|
|
75d44b3bae | ||
|
|
95ae6c4b49 | ||
|
|
98ca770f81 | ||
|
|
ccd967e3be | ||
|
|
0ebb73ee2e | ||
|
|
c8b84a0e9e | ||
|
|
3acb5cff45 | ||
|
|
ab801ad3d4 | ||
|
|
74116204ce | ||
|
|
2eb5f934d8 | ||
|
|
b43d376a87 | ||
|
|
e4a44f6224 | ||
|
|
0272973175 | ||
|
|
adca28801d | ||
|
|
d2a3f92452 | ||
|
|
ede86845e5 | ||
|
|
e57c742674 | ||
|
|
bb5976d727 | ||
|
|
670724184c | ||
|
|
f7c1a59de1 | ||
|
|
01a2ccc52f | ||
|
|
51ba1dac49 | ||
|
|
a4463dd40f | ||
|
|
83a82d818e | ||
|
|
1d1c4430b2 | ||
|
|
4e00b47b52 | ||
|
|
43e6d1ce2d | ||
|
|
30da442a85 | ||
|
|
038d91feaa | ||
|
|
e7ba78beee | ||
|
|
ab43804efd | ||
|
|
1c865dd119 | ||
|
|
b32d0a5b60 | ||
|
|
79e21601b0 | ||
|
|
34253aa595 | ||
|
|
79ed7ce451 | ||
|
|
900eebb9a4 | ||
|
|
6914b2c99d | ||
|
|
0dd3a08169 | ||
|
|
f8f290e848 | ||
|
|
9179cdfc9d | ||
|
|
76b6dc0112 | ||
|
|
ce303f5c7e | ||
|
|
b4b7a18497 | ||
|
|
1e2ebc9945 | ||
|
|
a49e3647b6 | ||
|
|
954e17c3d0 | ||
|
|
2a9819aff8 | ||
|
|
8049184dcc | ||
|
|
6c6137b2e7 | ||
|
|
19c4f3082b | ||
|
|
433c2831ae | ||
|
|
9138b2b503 | ||
|
|
6d64aab420 | ||
|
|
77509ce391 | ||
|
|
adcaa6f9de | ||
|
|
ce129efa09 | ||
|
|
121164db56 | ||
|
|
195f95196e | ||
|
|
a20d4568a2 | ||
|
|
e656beb915 | ||
|
|
cd04600862 | ||
|
|
3acc0ebb81 | ||
|
|
88daaef76b | ||
|
|
1cdaced8b6 | ||
|
|
0b8255529a | ||
|
|
019fe69a57 | ||
|
|
d90ab904e7 | ||
|
|
6ae30b21c9 | ||
|
|
b16781846e | ||
|
|
5ce82b45da | ||
|
|
e99bc177c0 | ||
|
|
b68bc75dad | ||
|
|
d61eac080b | ||
|
|
d8be9f12a2 | ||
|
|
0cf4539fe8 | ||
|
|
db6bba709a | ||
|
|
2174a22835 | ||
|
|
a8dd7b3eda | ||
|
|
25a55bae6f | ||
|
|
fe157166ca | ||
|
|
f7259adf83 | ||
|
|
6669560cb9 | ||
|
|
b46ab7e921 | ||
|
|
27266f8a54 | ||
|
|
1b6ba0d062 | ||
|
|
0d0f09ee66 | ||
|
|
f200a7fb6a | ||
|
|
98691f75bc | ||
|
|
47e304d03c | ||
|
|
9108abf204 | ||
|
|
6529dcb3b5 | ||
|
|
4b22b288a6 | ||
|
|
abbf6ce6cc | ||
|
|
416ec316bd | ||
|
|
5ffc733eec | ||
|
|
8a23988711 | ||
|
|
35212b673e | ||
|
|
57ff9abca2 | ||
|
|
4752323e1c | ||
|
|
11593c6cc4 | ||
|
|
10025bda45 | ||
|
|
b800541fbe | ||
|
|
3a73f1ead5 | ||
|
|
1340281cb8 | ||
|
|
b9be841fd2 | ||
|
|
73890f31af | ||
|
|
456b2ef6eb | ||
|
|
ad8b53e6d4 | ||
|
|
0b5d1bc91d | ||
|
|
c43da3005a | ||
|
|
ca4c15bc63 | ||
|
|
a880283593 | ||
|
|
e464a5bfbc | ||
|
|
eb6bf454f1 | ||
|
|
ec06089eda | ||
|
|
0c4be55936 | ||
|
|
11d21d5d1b | ||
|
|
f9648d3976 | ||
|
|
2955aae8e4 | ||
|
|
9fd836e51f | ||
|
|
518f44908c | ||
|
|
38f60b3c1d | ||
|
|
e2c71717f8 | ||
|
|
7764c542f2 | ||
|
|
aa6468932b | ||
|
|
30104cb12b | ||
|
|
d53e560ce0 | ||
|
|
44c8af66ad | ||
|
|
68aaa5bbc3 | ||
|
|
17747db93f | ||
|
|
3fe27c8411 | ||
|
|
14b1c9f8e4 | ||
|
|
d84fc58cac | ||
|
|
187c3f62df | ||
|
|
4a447a439a | ||
|
|
4bfc50411c | ||
|
|
5e8392c8ef | ||
|
|
d3c81a6e93 | ||
|
|
7342b5355f | ||
|
|
1341bf5a9e | ||
|
|
48aebf2d9d | ||
|
|
7b14e9b660 | ||
|
|
07eb24b775 | ||
|
|
cd849bc2ff | ||
|
|
9c66812b99 | ||
|
|
ec91fa55db | ||
|
|
00d3cc4b69 | ||
|
|
14ff7f5fcf | ||
|
|
a97ce3c96e | ||
|
|
cfc5681b36 | ||
|
|
369a876ebe | ||
|
|
778e9c864f | ||
|
|
a2616b8227 | ||
|
|
8d8f28eae4 | ||
|
|
e7d7d5232c | ||
|
|
1d65ef3201 | ||
|
|
8d425e3372 | ||
|
|
3939c6f6e7 | ||
|
|
60d91234b9 | ||
|
|
37c14207d6 | ||
|
|
3b9fbf80ad | ||
|
|
c2fdf73491 | ||
|
|
143f9371c6 | ||
|
|
3f1902face | ||
|
|
c0adb52213 | ||
|
|
3520e946a2 | ||
|
|
f38adc1865 | ||
|
|
d5ff1c8e3b | ||
|
|
ad7417bc50 |
@@ -1,2 +1,9 @@
|
||||
.git
|
||||
.github
|
||||
docs
|
||||
default.etcd
|
||||
browser
|
||||
*.gz
|
||||
*.tar.gz
|
||||
*.bzip2
|
||||
*.zip
|
||||
|
||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -10,9 +10,10 @@
|
||||
## Types of changes
|
||||
- [ ] Bug fix (non-breaking change which fixes an issue)
|
||||
- [ ] New feature (non-breaking change which adds functionality)
|
||||
- [ ] Optimization (provides speedup with no functional changes)
|
||||
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
|
||||
|
||||
## Checklist:
|
||||
- [ ] Fixes a regression (If yes, please add `commit-id` or `PR #` here)
|
||||
- [ ] Documentation needed
|
||||
- [ ] Unit tests needed
|
||||
- [ ] Documentation updated
|
||||
- [ ] Unit tests added/updated
|
||||
|
||||
1
.github/stale.yml
vendored
1
.github/stale.yml
vendored
@@ -14,6 +14,7 @@ onlyLabels: []
|
||||
exemptLabels:
|
||||
- "security"
|
||||
- "pending discussion"
|
||||
- "do not close"
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
51
.github/workflows/codeql.yml
vendored
51
.github/workflows/codeql.yml
vendored
@@ -1,51 +0,0 @@
|
||||
name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 19 * * 0'
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
|
||||
# CodeQL runs on ubuntu-latest and windows-latest
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: go, javascript
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
10
.github/workflows/go.yml
vendored
10
.github/workflows/go.yml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- release
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -12,7 +11,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.13.x]
|
||||
go-version: [1.15.x, 1.16.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -27,7 +26,6 @@ jobs:
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
MINIO_CI_CD: 1
|
||||
run: |
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 50m ./...
|
||||
@@ -36,9 +34,13 @@ jobs:
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
MINIO_CI_CD: 1
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
sudo apt-get install devscripts shellcheck
|
||||
nancy_version=$(curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/sonatype-nexus-community/nancy/releases/latest | sed "s/https:\/\/github.com\/sonatype-nexus-community\/nancy\/releases\/tag\///")
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-${nancy_version}-linux-amd64 && chmod +x nancy
|
||||
go list -m all | ./nancy sleuth
|
||||
make
|
||||
diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
|
||||
@@ -23,5 +23,11 @@ issues:
|
||||
exclude:
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
|
||||
run:
|
||||
skip-dirs:
|
||||
- pkg/rpc
|
||||
- pkg/argon2
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
|
||||
@@ -114,11 +114,11 @@ checksum:
|
||||
|
||||
signs:
|
||||
-
|
||||
signature: "${artifact}.asc"
|
||||
signature: "${artifact}.minisig"
|
||||
cmd: "sh"
|
||||
args:
|
||||
- '-c'
|
||||
- 'gpg --quiet --detach-sign -a ${artifact}'
|
||||
- 'minisign -s /media/${USER}/minio/minisign.key -qQSm ${artifact} < /media/${USER}/minio/minisign-passphrase'
|
||||
artifacts: all
|
||||
|
||||
changelog:
|
||||
|
||||
0
.nancy-ignore
Normal file
0
.nancy-ignore
Normal file
17
Dockerfile
17
Dockerfile
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.13-alpine as builder
|
||||
FROM golang:1.15-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
@@ -11,22 +11,27 @@ RUN \
|
||||
git clone https://github.com/minio/minio && cd minio && \
|
||||
git checkout master && go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM alpine:3.12
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=builder /go/bin/minio /usr/bin/minio
|
||||
COPY --from=builder /go/minio/CREDITS /third_party/
|
||||
COPY --from=builder /go/minio/CREDITS /licenses/CREDITS
|
||||
COPY --from=builder /go/minio/LICENSE /licenses/LICENSE
|
||||
COPY --from=builder /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
microdnf clean all && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
FROM multiarch/qemu-user-static:x86_64-arm as qemu
|
||||
FROM arm32v7/alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY --from=qemu /usr/bin/qemu-arm-static /usr/bin
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-arm/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,29 +0,0 @@
|
||||
FROM multiarch/qemu-user-static:x86_64-aarch64 as qemu
|
||||
FROM arm64v8/alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY --from=qemu /usr/bin/qemu-aarch64-static /usr/bin
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-arm64/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
42
Dockerfile.cicd
Normal file
42
Dockerfile.cicd
Normal file
@@ -0,0 +1,42 @@
|
||||
FROM golang:1.15-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio && cd minio && \
|
||||
git checkout master && go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=builder /go/bin/minio /usr/bin/minio
|
||||
COPY --from=builder /go/minio/CREDITS /licenses/CREDITS
|
||||
COPY --from=builder /go/minio/LICENSE /licenses/LICENSE
|
||||
COPY --from=builder /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
microdnf clean all
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio", "server", "/data"]
|
||||
@@ -1,22 +1,25 @@
|
||||
FROM alpine:3.12
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY minio /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
ENV MINIO_UPDATE=off \
|
||||
MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
RUN microdnf update --nodocs
|
||||
RUN microdnf install curl ca-certificates shadow-utils util-linux --nodocs
|
||||
RUN microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu
|
||||
FROM ubuntu:20.04
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
FROM multiarch/qemu-user-static:x86_64-ppc64le as qemu
|
||||
FROM ppc64le/alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY --from=qemu /usr/bin/qemu-ppc64le-static /usr/bin
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-ppc64le/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,21 +1,41 @@
|
||||
FROM alpine:3.12
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
ARG TARGETARCH
|
||||
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="RELEASE.2021-02-24T18-44-45Z" \
|
||||
release="RELEASE.2021-02-24T18-44-45Z" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-amd64/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.minisig -o /usr/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
FROM multiarch/qemu-user-static:x86_64-s390x as qemu
|
||||
FROM s390x/alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY --from=qemu /usr/bin/qemu-s390x-static /usr/bin
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-s390x/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
30
Makefile
30
Makefile
@@ -17,11 +17,18 @@ checks:
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
|
||||
@which ruleguard 1>/dev/null || (echo "Installing ruleguard" && go get github.com/quasilyte/go-ruleguard/cmd/ruleguard@v0.2.1)
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && go get github.com/tinylib/msgp@v1.1.3)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && go get golang.org/x/tools/cmd/stringer)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps fmt lint
|
||||
verifiers: getdeps fmt lint ruleguard check-gen
|
||||
|
||||
check-gen:
|
||||
@go generate ./... >/dev/null
|
||||
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
|
||||
|
||||
fmt:
|
||||
@echo "Running $@ check"
|
||||
@@ -31,7 +38,11 @@ fmt:
|
||||
lint:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=10m --config ./.golangci.yml
|
||||
|
||||
ruleguard:
|
||||
@echo "Running $@ check"
|
||||
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go github.com/minio/minio/...
|
||||
|
||||
# Builds minio, runs the verifiers then runs the tests.
|
||||
check: test
|
||||
@@ -46,7 +57,7 @@ test-race: verifiers build
|
||||
# Verify minio binary
|
||||
verify:
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
# Verify healing of disks with minio binary
|
||||
@@ -60,9 +71,18 @@ build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
docker: checks
|
||||
hotfix-vars:
|
||||
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#')))
|
||||
$(eval TAG := "minio/minio:$(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD)")
|
||||
hotfix: hotfix-vars install
|
||||
|
||||
docker-hotfix: hotfix checks
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
||||
docker: build checks
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@GOOS=linux GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
||||
# Builds minio and installs it to $GOPATH/bin.
|
||||
|
||||
228
README.md
228
README.md
@@ -5,80 +5,175 @@
|
||||
|
||||
MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
|
||||
## Docker Container
|
||||
### Stable
|
||||
```
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio server /data
|
||||
This README provides quickstart instructions on running MinIO on baremetal hardware, including Docker-based installations. For Kubernetes environments,
|
||||
use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
|
||||
# Docker Installation
|
||||
|
||||
Use the following commands to run a standalone MinIO server on a Docker container.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
## Stable
|
||||
|
||||
Run the following command to run the latest stable image of MinIO on a Docker container using an ephemeral data volume:
|
||||
|
||||
```sh
|
||||
docker run -p 9000:9000 minio/minio server /data
|
||||
```
|
||||
|
||||
### Edge
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
> NOTE: To deploy MinIO on Docker with persistent storage, you must map local persistent directories from the host OS to the container using the
|
||||
`docker -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the Docker container.
|
||||
|
||||
## Edge
|
||||
|
||||
Run the following command to run the bleeding-edge image of MinIO on a Docker container using an ephemeral data volume:
|
||||
|
||||
```
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio:edge server /data
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
```
|
||||
|
||||
> NOTE: Docker will not display the default keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use default keys with containers. Please visit MinIO Docker quickstart guide for more information [here](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
> NOTE: To deploy MinIO on Docker with persistent storage, you must map local persistent directories from the host OS to the container using the
|
||||
`docker -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the Docker container.
|
||||
|
||||
# macOS
|
||||
|
||||
Use the following commands to run a standalone MinIO server on macOS.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
## Homebrew (recommended)
|
||||
|
||||
Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
## macOS
|
||||
### Homebrew (recommended)
|
||||
Install minio packages using [Homebrew](https://brew.sh/)
|
||||
```sh
|
||||
brew install minio/stable/minio
|
||||
minio server /data
|
||||
```
|
||||
|
||||
> NOTE: If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
|
||||
|
||||
```sh
|
||||
brew uninstall minio
|
||||
brew install minio/stable/minio
|
||||
```
|
||||
|
||||
### Binary Download
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| Apple macOS | 64-bit Intel | https://dl.min.io/server/minio/release/darwin-amd64/minio |
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
## Binary Download
|
||||
|
||||
Use the following command to download and run a standalone MinIO server on macOS. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
```sh
|
||||
chmod 755 minio
|
||||
wget https://dl.min.io/server/minio/release/darwin-amd64/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
## GNU/Linux
|
||||
### Binary Download
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| GNU/Linux | 64-bit Intel | https://dl.min.io/server/minio/release/linux-amd64/minio |
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
# GNU/Linux
|
||||
|
||||
Use the following command to run a standalone MinIO server on Linux hosts running 64-bit Intel/AMD architectures. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/linux-amd64/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| GNU/Linux | ppc64le | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
|
||||
Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
The following table lists supported architectures. Replace the `wget` URL with the architecture for your Linux host.
|
||||
|
||||
| Architecture | URL |
|
||||
| -------- | ------ |
|
||||
| 64-bit Intel/AMD | https://dl.min.io/server/minio/release/linux-amd64/minio |
|
||||
| 64-bit ARM | https://dl.min.io/server/minio/release/linux-arm64/minio |
|
||||
| 64-bit PowerPC LE (ppc64le) | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
|
||||
| IBM Z-Series (S390X) | https://dl.min.io/server/minio/release/linux-s390x/minio |
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
# Microsoft Windows
|
||||
|
||||
To run MinIO on 64-bit Windows hosts, download the MinIO executable from the following URL:
|
||||
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/linux-ppc64le/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
https://dl.min.io/server/minio/release/windows-amd64/minio.exe
|
||||
```
|
||||
|
||||
## Microsoft Windows
|
||||
### Binary Download
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| Microsoft Windows | 64-bit | https://dl.min.io/server/minio/release/windows-amd64/minio.exe |
|
||||
Use the following command to run a standalone MinIO server on the Windows host. Replace ``D:\`` with the path to the drive or directory in which you want MinIO to store data. You must change the terminal or powershell directory to the location of the ``minio.exe`` executable, *or* add the path to that directory to the system ``$PATH``:
|
||||
|
||||
```sh
|
||||
minio.exe server D:\Photos
|
||||
minio.exe server D:\
|
||||
```
|
||||
|
||||
## FreeBSD
|
||||
### Port
|
||||
Install minio packages using [pkg](https://github.com/freebsd/pkg), MinIO doesn't officially build FreeBSD binaries but is maintained by FreeBSD upstream [here](https://www.freshports.org/www/minio).
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
# FreeBSD
|
||||
|
||||
MinIO does not provide an official FreeBSD binary. However, FreeBSD maintains an [upstream release](https://www.freshports.org/www/minio) using [pkg](https://github.com/freebsd/pkg):
|
||||
|
||||
```sh
|
||||
pkg install minio
|
||||
@@ -87,13 +182,32 @@ sysrc minio_disks=/home/user/Photos
|
||||
service minio start
|
||||
```
|
||||
|
||||
## Install from Source
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.13](https://golang.org/dl/#stable)
|
||||
# Install from Source
|
||||
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.15](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
|
||||
# Deployment Recommendations
|
||||
|
||||
## Allow port access for Firewalls
|
||||
|
||||
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
|
||||
@@ -149,20 +263,22 @@ iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
## Pre-existing data
|
||||
When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.
|
||||
|
||||
The above statement is also valid for all gateway backends.
|
||||
|
||||
# Test MinIO Connectivity
|
||||
|
||||
## Test using MinIO Browser
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 ensure your server has started successfully.
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure your server has started successfully.
|
||||
|
||||

|
||||
|
||||
## Test using MinIO Client `mc`
|
||||
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://docs.min.io/docs/minio-client-quickstart-guide) for further instructions.
|
||||
|
||||
## Pre-existing data
|
||||
When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.
|
||||
|
||||
The above statement is also valid for all gateway backends.
|
||||
|
||||
## Upgrading MinIO
|
||||
# Upgrading MinIO
|
||||
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster simultaneously and restart them, as shown in the following command from the MinIO client (mc):
|
||||
|
||||
```
|
||||
@@ -171,17 +287,17 @@ mc admin update <minio alias, e.g., myminio>
|
||||
|
||||
> NOTE: some releases might not allow rolling upgrades, this is always called out in the release notes and it is generally advised to read release notes before upgrading. In such a situation `mc admin update` is the recommended upgrading mechanism to upgrade all servers at once.
|
||||
|
||||
### Important things to remember during MinIO upgrades
|
||||
## Important things to remember during MinIO upgrades
|
||||
|
||||
- `mc admin update` will only work if the user running MinIO has write access to the parent directory where the binary is located, for example if the current binary is at `/usr/local/bin/minio`, you would need write access to `/usr/local/bin`.
|
||||
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
|
||||
- `mc admin update` is disabled in kubernetes/container environments, container environments provide their own mechanisms to rollout of updates.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` to any new releases until all clusters have been successfully updated.
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)x
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)
|
||||
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
## Explore Further
|
||||
# Explore Further
|
||||
- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
- [Use `mc` with MinIO Server](https://docs.min.io/docs/minio-client-quickstart-guide)
|
||||
- [Use `aws-cli` with MinIO Server](https://docs.min.io/docs/aws-cli-with-minio)
|
||||
@@ -189,8 +305,8 @@ mc admin update <minio alias, e.g., myminio>
|
||||
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/docs/golang-client-quickstart-guide)
|
||||
- [The MinIO documentation website](https://docs.min.io)
|
||||
|
||||
## Contribute to MinIO Project
|
||||
# Contribute to MinIO Project
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
|
||||
## License
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Fminio%2Fminio?ref=badge_large)
|
||||
# License
|
||||
Use of MinIO is governed by the Apache 2.0 License found at [LICENSE](https://github.com/minio/minio/blob/master/LICENSE).
|
||||
|
||||
118
README_zh_CN.md
118
README_zh_CN.md
@@ -7,29 +7,33 @@ MinIO是一个非常轻量的服务,可以很简单的和其他应用的结合
|
||||
## Docker 容器
|
||||
### 稳定版
|
||||
```
|
||||
docker pull minio/minio
|
||||
docker run -p 9000:9000 minio/minio server /data
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio server /data
|
||||
```
|
||||
|
||||
### 尝鲜版
|
||||
```
|
||||
docker pull minio/minio:edge
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio:edge server /data
|
||||
```
|
||||
更多Docker部署信息请访问 [这里](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
> 提示:除非你通过`-it`(TTY交互)参数启动容器,否则Docker将不会显示默认的密钥。一般情况下,并不推荐使用容器的默认密钥,更多Docker部署信息请访问 [这里](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
## macOS
|
||||
### Homebrew
|
||||
### Homebrew(推荐)
|
||||
使用 [Homebrew](http://brew.sh/)安装minio
|
||||
|
||||
```sh
|
||||
brew install minio/stable/minio
|
||||
minio server /data
|
||||
```
|
||||
#### Note
|
||||
如果你之前使用 `brew install minio`安装过minio, 可以用 `minio/stable/minio` 官方镜像进行重装. 由于golang 1.8的bug,homebrew版本不太稳定。
|
||||
|
||||
```
|
||||
> 提示:如果你之前使用 `brew install minio`安装过minio, 可以用 `minio/stable/minio` 官方镜像进行重装. 由于golang 1.8的bug,homebrew版本不太稳定。
|
||||
```sh
|
||||
brew uninstall minio
|
||||
brew install minio/stable/minio
|
||||
```
|
||||
@@ -49,6 +53,16 @@ chmod 755 minio
|
||||
| ---------- | -------- | ------ |
|
||||
| GNU/Linux | 64-bit Intel | https://dl.min.io/server/minio/release/linux-amd64/minio |
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/linux-amd64/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
| 操作系统 | CPU架构 | 地址 |
|
||||
| ---------- | -------- | ------ |
|
||||
| GNU/Linux | ppc64le | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/linux-ppc64le/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
@@ -64,7 +78,7 @@ minio.exe server D:\Photos
|
||||
|
||||
## FreeBSD
|
||||
### Port
|
||||
使用 [pkg](https://github.com/freebsd/pkg)进行安装。
|
||||
使用 [pkg](https://github.com/freebsd/pkg)进行安装,MinIO官方并没有提供FreeBSD二进制文件, 它由FreeBSD上游维护,点击 [这里](https://www.freshports.org/www/minio)查看。
|
||||
|
||||
```sh
|
||||
pkg install minio
|
||||
@@ -75,14 +89,68 @@ service minio start
|
||||
|
||||
## 使用源码安装
|
||||
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install).
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.15](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
go get -u github.com/minio/minio
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
```
|
||||
|
||||
## 为防火墙设置允许访问的端口
|
||||
|
||||
默认情况下,MinIO 使用端口9000来侦听传入的连接。如果你的平台默认阻止了该端口,则需要启用对该端口的访问。
|
||||
|
||||
### ufw
|
||||
|
||||
对于启用了ufw的主机(基于Debian的发行版), 你可以通过`ufw`命令允许指定端口上的所有流量连接. 通过如下命令允许访问端口9000
|
||||
|
||||
```sh
|
||||
ufw allow 9000
|
||||
```
|
||||
|
||||
如下命令允许端口9000-9010上的所有传入流量。
|
||||
|
||||
```sh
|
||||
ufw allow 9000:9010/tcp
|
||||
```
|
||||
|
||||
### firewall-cmd
|
||||
|
||||
对于启用了firewall-cmd的主机(CentOS), 你可以通过`firewall-cmd`命令允许指定端口上的所有流量连接。 通过如下命令允许访问端口9000
|
||||
|
||||
```sh
|
||||
firewall-cmd --get-active-zones
|
||||
```
|
||||
|
||||
这个命令获取当前正在使用的区域。 现在,就可以为以上返回的区域应用端口规则了。 假如返回的区域是 `public`, 使用如下命令
|
||||
|
||||
```sh
|
||||
firewall-cmd --zone=public --add-port=9000/tcp --permanent
|
||||
```
|
||||
|
||||
这里的`permanent`参数表示持久化存储规则,可用于防火墙启动、重启和重新加载。 最后,需要防火墙重新加载,让我们刚刚的修改生效。
|
||||
|
||||
```sh
|
||||
firewall-cmd --reload
|
||||
```
|
||||
|
||||
### iptables
|
||||
|
||||
对于启用了iptables的主机(RHEL, CentOS, etc), 你可以通过`iptables`命令允许指定端口上的所有流量连接。 通过如下命令允许访问端口9000
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
如下命令允许端口9000-9010上的所有传入流量。
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
## 使用MinIO浏览器进行验证
|
||||
安装后使用浏览器访问[http://127.0.0.1:9000](http://127.0.0.1:9000),如果可以访问,则表示minio已经安装成功。
|
||||
MinIO Server带有一个嵌入的Web对象浏览器,安装后使用浏览器访问[http://127.0.0.1:9000](http://127.0.0.1:9000),如果可以访问,则表示minio已经安装成功。
|
||||
|
||||

|
||||
|
||||
@@ -94,6 +162,25 @@ go get -u github.com/minio/minio
|
||||
|
||||
上述描述对所有网关后端同样有效。
|
||||
|
||||
## 升级 MinIO
|
||||
MinIO 服务端支持滚动升级, 也就是说你可以一次更新分布式集群中的一个MinIO实例。 这样可以在不停机的情况下进行升级。可以通过将二进制文件替换为最新版本并以滚动方式重新启动所有服务器来手动完成升级。但是, 我们建议所有用户从客户端使用 [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) 命令升级。 这将同时更新集群中的所有节点并重新启动它们, 如下命令所示:
|
||||
|
||||
```
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
```
|
||||
|
||||
> 注意: 有些发行版可能不允许滚动升级,这通常在发行说明中提到,所以建议在升级之前阅读发行说明。在这种情况下,建议使用`mc admin update`升级机制来一次升级所有服务器。
|
||||
|
||||
### MinIO升级时要记住的重要事项
|
||||
|
||||
- `mc admin update` 命令仅当运行MinIO的用户对二进制文件所在的父目录具有写权限时才工作, 比如当前二进制文件位于`/usr/local/bin/minio`, 你需要具备`/usr/local/bin`目录的写权限.
|
||||
- `mc admin update` 命令同时更新并重新启动所有服务器,应用程序将在升级后重试并继续各自的操作。
|
||||
- `mc admin update` 命令在 kubernetes/container 环境下是不能用的, 容器环境提供了它自己的更新机制来更新。
|
||||
- 对于联盟部署模式,应分别针对每个群集运行`mc admin update`。 在成功更新所有群集之前,不要将`mc`更新为任何新版本。
|
||||
- 如果将`kes`用作MinIO的KMS,只需替换二进制文件并重新启动`kes`,可以在 [这里](https://github.com/minio/kes/wiki) 找到有关`kes`的更多信息。
|
||||
- 如果将Vault作为MinIO的KMS,请确保已遵循如下Vault升级过程的概述:https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- 如果将MinIO与etcd配合使用, 请确保已遵循如下etcd升级过程的概述: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
## 了解更多
|
||||
- [MinIO纠删码入门](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
- [`mc`快速入门](https://docs.min.io/docs/minio-client-quickstart-guide)
|
||||
@@ -103,4 +190,7 @@ go get -u github.com/minio/minio
|
||||
- [MinIO文档](https://docs.min.io)
|
||||
|
||||
## 如何参与到MinIO项目
|
||||
请参考 [贡献者指南](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)。欢迎各位中国程序员加到MinIO项目中。
|
||||
请参考 [贡献者指南](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)。欢迎各位中国程序员加入到MinIO项目中。
|
||||
|
||||
## 授权许可
|
||||
MinIO的使用受 Apache 2.0 License 约束,你可以在 [LICENSE](./LICENSE) 查看许可。
|
||||
|
||||
39
VULNERABILITY_REPORT.md
Normal file
39
VULNERABILITY_REPORT.md
Normal file
@@ -0,0 +1,39 @@
|
||||
## Vulnerability Management Policy
|
||||
|
||||
This document formally describes the process of addressing and managing a
|
||||
reported vulnerability that has been found in the MinIO server code base,
|
||||
any directly connected ecosystem component or a direct / indirect dependency
|
||||
of the code base.
|
||||
|
||||
### Scope
|
||||
|
||||
The vulnerability management policy described in this document covers the
|
||||
process of investigating, assessing and resolving a vulnerability report
|
||||
opened by a MinIO employee or an external third party.
|
||||
|
||||
Therefore, it lists pre-conditions and actions that should be performed to
|
||||
resolve and fix a reported vulnerability.
|
||||
|
||||
### Vulnerability Management Process
|
||||
|
||||
The vulnerability management process requires that the vulnerability report
|
||||
contains the following information:
|
||||
|
||||
- The project / component that contains the reported vulnerability.
|
||||
- A description of the vulnerability. In particular, the type of the
|
||||
reported vulnerability and how it might be exploited. Alternatively,
|
||||
a well-established vulnerability identifier, e.g. CVE number, can be
|
||||
used instead.
|
||||
|
||||
Based on the description mentioned above, a MinIO engineer or security team
|
||||
member investigates:
|
||||
|
||||
- Whether the reported vulnerability exists.
|
||||
- The conditions that are required such that the vulnerability can be exploited.
|
||||
- The steps required to fix the vulnerability.
|
||||
|
||||
In general, if the vulnerability exists in one of the MinIO code bases
|
||||
itself - not in a code dependency - then MinIO will, if possible, fix
|
||||
the vulnerability or implement reasonable countermeasures such that the
|
||||
vulnerability cannot be exploited anymore.
|
||||
|
||||
@@ -57,22 +57,6 @@ export class BrowserDropdown extends React.Component {
|
||||
const { fetchServerInfo } = this.props
|
||||
fetchServerInfo()
|
||||
}
|
||||
fullScreen(e) {
|
||||
e.preventDefault()
|
||||
let el = document.documentElement
|
||||
if (el.requestFullscreen) {
|
||||
el.requestFullscreen()
|
||||
}
|
||||
if (el.mozRequestFullScreen) {
|
||||
el.mozRequestFullScreen()
|
||||
}
|
||||
if (el.webkitRequestFullscreen) {
|
||||
el.webkitRequestFullscreen()
|
||||
}
|
||||
if (el.msRequestFullscreen) {
|
||||
el.msRequestFullscreen()
|
||||
}
|
||||
}
|
||||
logout(e) {
|
||||
e.preventDefault()
|
||||
web.Logout()
|
||||
@@ -87,24 +71,30 @@ export class BrowserDropdown extends React.Component {
|
||||
<i className="fas fa-bars" />
|
||||
</Dropdown.Toggle>
|
||||
<Dropdown.Menu className="dropdown-menu-right">
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/?ref=ob">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://github.com/minio/minio">
|
||||
GitHub <i className="fab fa-github" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.fullScreen}>
|
||||
Fullscreen <i className="fas fa-expand" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://slack.min.io">
|
||||
Ask for help <i className="fas fa-question-circle" />
|
||||
<a target="_blank" href="https://min.io/pricing?ref=ob">
|
||||
Get Support <i className="fas fa-question-circle" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
@@ -118,20 +108,9 @@ export class BrowserDropdown extends React.Component {
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" id="logout" onClick={this.logout}>
|
||||
Sign Out <i className="fas fa-sign-out-alt" />
|
||||
Logout <i className="fas fa-sign-out-alt" />
|
||||
</a>
|
||||
</li>
|
||||
</Dropdown.Menu>
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import ObjectsSearch from "../objects/ObjectsSearch"
|
||||
import Path from "../objects/Path"
|
||||
import StorageInfo from "./StorageInfo"
|
||||
import BrowserDropdown from "./BrowserDropdown"
|
||||
@@ -27,6 +28,7 @@ export const Header = () => {
|
||||
<header className="fe-header">
|
||||
<Path />
|
||||
{loggedIn && <StorageInfo />}
|
||||
{loggedIn && <ObjectsSearch />}
|
||||
<ul className="feh-actions">
|
||||
{loggedIn ? (
|
||||
<BrowserDropdown />
|
||||
|
||||
@@ -22,7 +22,8 @@ const bucketsFilterSelector = state => state.buckets.filter
|
||||
export const getFilteredBuckets = createSelector(
|
||||
bucketsSelector,
|
||||
bucketsFilterSelector,
|
||||
(buckets, filter) => buckets.filter(bucket => bucket.indexOf(filter) > -1)
|
||||
(buckets, filter) => buckets.filter(
|
||||
bucket => bucket.toLowerCase().indexOf(filter.toLowerCase()) > -1)
|
||||
)
|
||||
|
||||
export const getCurrentBucket = state => state.buckets.currentBucket
|
||||
|
||||
@@ -18,6 +18,7 @@ import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import InfiniteScroll from "react-infinite-scroller"
|
||||
import ObjectsList from "./ObjectsList"
|
||||
import { getFilteredObjects } from "./selectors"
|
||||
|
||||
export class ObjectsListContainer extends React.Component {
|
||||
constructor(props) {
|
||||
@@ -39,22 +40,29 @@ export class ObjectsListContainer extends React.Component {
|
||||
})
|
||||
}
|
||||
}
|
||||
componentDidUpdate(prevProps) {
|
||||
if (this.props.filter !== prevProps.filter) {
|
||||
this.setState({
|
||||
page: 1
|
||||
})
|
||||
}
|
||||
}
|
||||
loadNextPage() {
|
||||
this.setState(state => {
|
||||
return { page: state.page + 1 }
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { objects, listLoading } = this.props
|
||||
const { filteredObjects, listLoading } = this.props
|
||||
|
||||
const visibleObjects = objects.slice(0, this.state.page * 100)
|
||||
const visibleObjects = filteredObjects.slice(0, this.state.page * 100)
|
||||
|
||||
return (
|
||||
<div style={{ position: "relative" }}>
|
||||
<InfiniteScroll
|
||||
pageStart={0}
|
||||
loadMore={this.loadNextPage}
|
||||
hasMore={objects.length > visibleObjects.length}
|
||||
hasMore={filteredObjects.length > visibleObjects.length}
|
||||
useWindow={true}
|
||||
initialLoad={false}
|
||||
>
|
||||
@@ -70,7 +78,8 @@ const mapStateToProps = state => {
|
||||
return {
|
||||
currentBucket: state.buckets.currentBucket,
|
||||
currentPrefix: state.objects.currentPrefix,
|
||||
objects: state.objects.list,
|
||||
filteredObjects: getFilteredObjects(state),
|
||||
filter: state.objects.filter,
|
||||
sortBy: state.objects.sortBy,
|
||||
sortOrder: state.objects.sortOrder,
|
||||
listLoading: state.objects.listLoading
|
||||
|
||||
43
browser/app/js/objects/ObjectsSearch.js
Normal file
43
browser/app/js/objects/ObjectsSearch.js
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import * as actionsObjects from "./actions"
|
||||
|
||||
export const ObjectsSearch = ({ onChange }) => (
|
||||
<div
|
||||
className="input-group ig-left ig-search-dark"
|
||||
style={{ display: "block" }}
|
||||
>
|
||||
<input
|
||||
className="ig-text"
|
||||
type="input"
|
||||
placeholder="Search Objects..."
|
||||
onChange={e => onChange(e.target.value)}
|
||||
/>
|
||||
<i className="ig-helpers" />
|
||||
</div>
|
||||
)
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
return {
|
||||
onChange: filter =>
|
||||
dispatch(actionsObjects.setFilter(filter))
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(undefined, mapDispatchToProps)(ObjectsSearch)
|
||||
@@ -46,11 +46,11 @@ class PreviewObjectModal extends React.Component {
|
||||
<ModalBody>
|
||||
<div className="input-group">
|
||||
{this.state.url && (
|
||||
<img
|
||||
alt="Image broken"
|
||||
src={this.state.url}
|
||||
style={{ display: "block", width: "100%" }}
|
||||
/>
|
||||
<object data={this.state.url} style={{ display: "block", width: "100%" }}>
|
||||
<h3 style={{ textAlign: "center", display: "block", width: "100%" }}>
|
||||
Do not have read permissions to preview "{this.props.object.name}"
|
||||
</h3>
|
||||
</object>
|
||||
)}
|
||||
</div>
|
||||
</ModalBody>
|
||||
|
||||
@@ -20,13 +20,13 @@ import { ObjectsListContainer } from "../ObjectsListContainer"
|
||||
|
||||
describe("ObjectsList", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsListContainer objects={[]} />)
|
||||
shallow(<ObjectsListContainer filteredObjects={[]} />)
|
||||
})
|
||||
|
||||
it("should render ObjectsList with objects", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
objects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
filteredObjects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.find("ObjectsList").length).toBe(1)
|
||||
@@ -40,7 +40,7 @@ describe("ObjectsList", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
currentBucket="test1"
|
||||
objects={[]}
|
||||
filteredObjects={[]}
|
||||
listLoading={true}
|
||||
/>
|
||||
)
|
||||
|
||||
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { ObjectsSearch } from "../ObjectsSearch"
|
||||
|
||||
describe("ObjectsSearch", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsSearch />)
|
||||
})
|
||||
|
||||
it("should call onChange with search text", () => {
|
||||
const onChange = jest.fn()
|
||||
const wrapper = shallow(<ObjectsSearch onChange={onChange} />)
|
||||
wrapper.find("input").simulate("change", { target: { value: "test" } })
|
||||
expect(onChange).toHaveBeenCalledWith("test")
|
||||
})
|
||||
})
|
||||
@@ -23,6 +23,7 @@ describe("objects reducer", () => {
|
||||
const initialState = reducer(undefined, {})
|
||||
expect(initialState).toEqual({
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
|
||||
@@ -36,6 +36,7 @@ import { getServerInfo, hasServerPublicDomain } from '../browser/selectors'
|
||||
|
||||
export const SET_LIST = "objects/SET_LIST"
|
||||
export const RESET_LIST = "objects/RESET_LIST"
|
||||
export const SET_FILTER = "objects/SET_FILTER"
|
||||
export const APPEND_LIST = "objects/APPEND_LIST"
|
||||
export const REMOVE = "objects/REMOVE"
|
||||
export const SET_SORT_BY = "objects/SET_SORT_BY"
|
||||
@@ -57,6 +58,13 @@ export const resetList = () => ({
|
||||
type: RESET_LIST,
|
||||
})
|
||||
|
||||
export const setFilter = filter => {
|
||||
return {
|
||||
type: SET_FILTER,
|
||||
filter
|
||||
}
|
||||
}
|
||||
|
||||
export const setListLoading = (listLoading) => ({
|
||||
type: SET_LIST_LOADING,
|
||||
listLoading,
|
||||
|
||||
@@ -28,6 +28,7 @@ const removeObject = (list, objectToRemove, lookup) => {
|
||||
export default (
|
||||
state = {
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
@@ -53,6 +54,11 @@ export default (
|
||||
...state,
|
||||
list: []
|
||||
}
|
||||
case actionsObjects.SET_FILTER:
|
||||
return {
|
||||
...state,
|
||||
filter: action.filter
|
||||
}
|
||||
case actionsObjects.SET_LIST_LOADING:
|
||||
return {
|
||||
...state,
|
||||
|
||||
@@ -21,3 +21,13 @@ export const getCurrentPrefix = state => state.objects.currentPrefix
|
||||
export const getCheckedList = state => state.objects.checkedList
|
||||
|
||||
export const getPrefixWritable = state => state.objects.prefixWritable
|
||||
|
||||
const objectsSelector = state => state.objects.list
|
||||
const objectsFilterSelector = state => state.objects.filter
|
||||
|
||||
export const getFilteredObjects = createSelector(
|
||||
objectsSelector,
|
||||
objectsFilterSelector,
|
||||
(objects, filter) => objects.filter(
|
||||
object => object.name.toLowerCase().startsWith(filter.toLowerCase()))
|
||||
)
|
||||
@@ -36,7 +36,7 @@ export class Dropzone extends React.Component {
|
||||
// Overwrite the default styling from react-dropzone; otherwise it
|
||||
// won't handle child elements correctly.
|
||||
const style = {
|
||||
height: "100%",
|
||||
flex: "1",
|
||||
borderWidth: "0",
|
||||
borderStyle: "dashed",
|
||||
borderColor: "#fff"
|
||||
|
||||
@@ -138,9 +138,10 @@ describe("Uploads actions", () => {
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
})
|
||||
store.dispatch(uploadsActions.uploadFile(file))
|
||||
const objectPath = encodeURIComponent("pre1/file1")
|
||||
expect(open).toHaveBeenCalledWith(
|
||||
"PUT",
|
||||
"https://localhost:8080/upload/test1/pre1/file1",
|
||||
"https://localhost:8080/upload/test1/" + objectPath,
|
||||
true
|
||||
)
|
||||
expect(send).toHaveBeenCalledWith(file)
|
||||
|
||||
@@ -94,7 +94,7 @@ export const uploadFile = file => {
|
||||
_filePath = _filePath.substring(1)
|
||||
}
|
||||
const filePath = _filePath
|
||||
const objectName = `${currentPrefix}${filePath}`
|
||||
const objectName = encodeURIComponent(`${currentPrefix}${filePath}`)
|
||||
const uploadUrl = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/upload/${currentBucket}/${objectName}`
|
||||
|
||||
@@ -21,7 +21,7 @@ import storage from 'local-storage-fallback'
|
||||
|
||||
class Web {
|
||||
constructor(endpoint) {
|
||||
const namespace = 'Web'
|
||||
const namespace = 'web'
|
||||
this.JSONrpc = new JSONrpc({
|
||||
endpoint,
|
||||
namespace
|
||||
|
||||
@@ -20,7 +20,8 @@
|
||||
@media(max-width: @screen-sm-max) {
|
||||
padding: 75px 0 80px;
|
||||
}
|
||||
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
min-height:100vh;
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
@@ -169,6 +169,24 @@ select.form-control {
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search-dark {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
font-weight: 900;
|
||||
content: '\f002';
|
||||
font-size: 15px;
|
||||
position: absolute;
|
||||
left: 2px;
|
||||
top: 8px;
|
||||
color: rgba(0, 0, 0, 0.5);
|
||||
}
|
||||
|
||||
.ig-text {
|
||||
padding-left: 25px;
|
||||
.placeholder(rgba(0, 0, 0, 0.5))
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
@@ -270,4 +288,4 @@ select.form-control {
|
||||
.set-expire-decrease {
|
||||
bottom: -27px;
|
||||
.rotate(-180deg);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -349,6 +349,7 @@ div.fesl-row {
|
||||
margin: 0;
|
||||
height: 100%;
|
||||
text-align: right;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.dropdown {
|
||||
@@ -495,4 +496,4 @@ div.fesl-row {
|
||||
.opacity(1);
|
||||
right: 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
546
browser/package-lock.json
generated
546
browser/package-lock.json
generated
@@ -2271,7 +2271,9 @@
|
||||
"asap": {
|
||||
"version": "2.0.6",
|
||||
"resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz",
|
||||
"integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY="
|
||||
"integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY=",
|
||||
"dev": true,
|
||||
"optional": true
|
||||
},
|
||||
"asn1": {
|
||||
"version": "0.2.4",
|
||||
@@ -2283,19 +2285,20 @@
|
||||
}
|
||||
},
|
||||
"asn1.js": {
|
||||
"version": "4.10.1",
|
||||
"resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.10.1.tgz",
|
||||
"integrity": "sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==",
|
||||
"version": "5.4.1",
|
||||
"resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.4.1.tgz",
|
||||
"integrity": "sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==",
|
||||
"requires": {
|
||||
"bn.js": "^4.0.0",
|
||||
"inherits": "^2.0.1",
|
||||
"minimalistic-assert": "^1.0.0"
|
||||
"minimalistic-assert": "^1.0.0",
|
||||
"safer-buffer": "^2.1.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"bn.js": {
|
||||
"version": "4.11.8",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
|
||||
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
|
||||
"version": "4.11.9",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
|
||||
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw=="
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -3541,9 +3544,9 @@
|
||||
}
|
||||
},
|
||||
"base64-js": {
|
||||
"version": "1.3.1",
|
||||
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.1.tgz",
|
||||
"integrity": "sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g=="
|
||||
"version": "1.5.1",
|
||||
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
|
||||
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="
|
||||
},
|
||||
"batch": {
|
||||
"version": "0.6.1",
|
||||
@@ -3574,13 +3577,12 @@
|
||||
"bluebird": {
|
||||
"version": "3.7.2",
|
||||
"resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
|
||||
"integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==",
|
||||
"dev": true
|
||||
"integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg=="
|
||||
},
|
||||
"bn.js": {
|
||||
"version": "5.1.1",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.1.1.tgz",
|
||||
"integrity": "sha512-IUTD/REb78Z2eodka1QZyyEk66pciRcP6Sroka0aI3tG/iwIdYLrBD62RsubR7vqdt3WyX8p4jxeatzmRSphtA=="
|
||||
"version": "5.1.3",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.1.3.tgz",
|
||||
"integrity": "sha512-GkTiFpjFtUzU9CbMeJ5iazkCzGL3jrhzerzZIuqLABjbwRaFt33I9tUdSNryIptM+RxDet6OKm2WnLXzW51KsQ=="
|
||||
},
|
||||
"body-parser": {
|
||||
"version": "1.19.0",
|
||||
@@ -3754,31 +3756,24 @@
|
||||
}
|
||||
},
|
||||
"browserify-rsa": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.0.1.tgz",
|
||||
"integrity": "sha1-IeCr+vbyApzy+vsTNWenAdQTVSQ=",
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.1.0.tgz",
|
||||
"integrity": "sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog==",
|
||||
"requires": {
|
||||
"bn.js": "^4.1.0",
|
||||
"bn.js": "^5.0.0",
|
||||
"randombytes": "^2.0.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"bn.js": {
|
||||
"version": "4.11.8",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
|
||||
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"browserify-sign": {
|
||||
"version": "4.2.0",
|
||||
"resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.0.tgz",
|
||||
"integrity": "sha512-hEZC1KEeYuoHRqhGhTy6gWrpJA3ZDjFWv0DE61643ZnOXAKJb3u7yWcrU0mMc9SwAqK1n7myPGndkp0dFG7NFA==",
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.1.tgz",
|
||||
"integrity": "sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==",
|
||||
"requires": {
|
||||
"bn.js": "^5.1.1",
|
||||
"browserify-rsa": "^4.0.1",
|
||||
"create-hash": "^1.2.0",
|
||||
"create-hmac": "^1.1.7",
|
||||
"elliptic": "^6.5.2",
|
||||
"elliptic": "^6.5.3",
|
||||
"inherits": "^2.0.4",
|
||||
"parse-asn1": "^5.1.5",
|
||||
"readable-stream": "^3.6.0",
|
||||
@@ -4022,9 +4017,9 @@
|
||||
}
|
||||
},
|
||||
"chokidar": {
|
||||
"version": "3.4.0",
|
||||
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.4.0.tgz",
|
||||
"integrity": "sha512-aXAaho2VJtisB/1fg1+3nlLJqGOuewTzQpd/Tz0yTg2R0e4IGtshYvtjowyEumcBv2z+y4+kc75Mz7j5xJskcQ==",
|
||||
"version": "3.4.3",
|
||||
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.4.3.tgz",
|
||||
"integrity": "sha512-DtM3g7juCXQxFVSNPNByEC2+NImtBuxQQvWlHunpJIS5Ocr0lG306cC7FCi7cEA0fzmybPUIl4txBIobk1gGOQ==",
|
||||
"optional": true,
|
||||
"requires": {
|
||||
"anymatch": "~3.1.1",
|
||||
@@ -4034,7 +4029,7 @@
|
||||
"is-binary-path": "~2.1.0",
|
||||
"is-glob": "~4.0.1",
|
||||
"normalize-path": "~3.0.0",
|
||||
"readdirp": "~3.4.0"
|
||||
"readdirp": "~3.5.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"anymatch": {
|
||||
@@ -4048,9 +4043,9 @@
|
||||
}
|
||||
},
|
||||
"binary-extensions": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.0.0.tgz",
|
||||
"integrity": "sha512-Phlt0plgpIIBOGTT/ehfFnbNlfsDEiqmzE2KRXoX1bLIlir4X/MR+zSyBEkL05ffWgnRSf/DXv+WrUAVr93/ow==",
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.1.0.tgz",
|
||||
"integrity": "sha512-1Yj8h9Q+QDF5FzhMs/c9+6UntbD5MkRfRwac8DoEm9ZfUBZ7tZ55YcGVAzEe4bXsdQHEk+s9S5wsOKVdZrw0tQ==",
|
||||
"optional": true
|
||||
},
|
||||
"braces": {
|
||||
@@ -4123,9 +4118,9 @@
|
||||
"optional": true
|
||||
},
|
||||
"readdirp": {
|
||||
"version": "3.4.0",
|
||||
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.4.0.tgz",
|
||||
"integrity": "sha512-0xe001vZBnJEK+uKcj8qOhyAKPzIT+gStxWr3LCB0DwcXR5NZJ3IaC+yGnHCYzB/S7ov3m3EEbZI2zeNvX+hGQ==",
|
||||
"version": "3.5.0",
|
||||
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz",
|
||||
"integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==",
|
||||
"optional": true,
|
||||
"requires": {
|
||||
"picomatch": "^2.2.1"
|
||||
@@ -4143,9 +4138,9 @@
|
||||
}
|
||||
},
|
||||
"chownr": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.1.tgz",
|
||||
"integrity": "sha512-j38EvO5+LHX84jlo6h4UzmOwi0UgW61WRyPtJz4qaadK5eY3BTS5TY/S1Stc3Uk2lIM6TPevAlULiEJwie860g=="
|
||||
"version": "1.1.4",
|
||||
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
|
||||
"integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="
|
||||
},
|
||||
"chrome-trace-event": {
|
||||
"version": "1.0.2",
|
||||
@@ -4713,18 +4708,18 @@
|
||||
"integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac="
|
||||
},
|
||||
"create-ecdh": {
|
||||
"version": "4.0.3",
|
||||
"resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.3.tgz",
|
||||
"integrity": "sha512-GbEHQPMOswGpKXM9kCWVrremUcBmjteUaQ01T9rkKCPDXfUHX0IoP9LpHYo2NPFampa4e+/pFDc3jQdxrxQLaw==",
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz",
|
||||
"integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==",
|
||||
"requires": {
|
||||
"bn.js": "^4.1.0",
|
||||
"elliptic": "^6.0.0"
|
||||
"elliptic": "^6.5.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"bn.js": {
|
||||
"version": "4.11.8",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
|
||||
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
|
||||
"version": "4.11.9",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
|
||||
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw=="
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -4925,9 +4920,9 @@
|
||||
}
|
||||
},
|
||||
"cyclist": {
|
||||
"version": "0.2.2",
|
||||
"resolved": "https://registry.npmjs.org/cyclist/-/cyclist-0.2.2.tgz",
|
||||
"integrity": "sha1-GzN5LhHpFKL9bW7WRHRkRE5fpkA="
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/cyclist/-/cyclist-1.0.1.tgz",
|
||||
"integrity": "sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk="
|
||||
},
|
||||
"dashdash": {
|
||||
"version": "1.14.1",
|
||||
@@ -5242,9 +5237,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"bn.js": {
|
||||
"version": "4.11.8",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
|
||||
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
|
||||
"version": "4.11.9",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
|
||||
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw=="
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -5420,9 +5415,9 @@
|
||||
}
|
||||
},
|
||||
"duplexify": {
|
||||
"version": "3.6.1",
|
||||
"resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.6.1.tgz",
|
||||
"integrity": "sha512-vM58DwdnKmty+FSPzT14K9JXb90H+j5emaR4KYbr2KTIz00WHGbWOe5ghQTx233ZCLZtrGDALzKwcjEtSt35mA==",
|
||||
"version": "3.7.1",
|
||||
"resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz",
|
||||
"integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==",
|
||||
"requires": {
|
||||
"end-of-stream": "^1.0.0",
|
||||
"inherits": "^2.0.1",
|
||||
@@ -5471,9 +5466,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"elliptic": {
|
||||
"version": "6.5.2",
|
||||
"resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.2.tgz",
|
||||
"integrity": "sha512-f4x70okzZbIQl/NSRLkI/+tteV/9WqL98zx+SQ69KbXxmVrmjwsNUPn/gYJJ0sHvEak24cZgHIPegRePAtA/xw==",
|
||||
"version": "6.5.3",
|
||||
"resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.3.tgz",
|
||||
"integrity": "sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw==",
|
||||
"requires": {
|
||||
"bn.js": "^4.4.0",
|
||||
"brorand": "^1.0.1",
|
||||
@@ -5485,9 +5480,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"bn.js": {
|
||||
"version": "4.11.8",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
|
||||
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
|
||||
"version": "4.11.9",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
|
||||
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw=="
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -5509,14 +5504,6 @@
|
||||
"integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=",
|
||||
"dev": true
|
||||
},
|
||||
"encoding": {
|
||||
"version": "0.1.12",
|
||||
"resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.12.tgz",
|
||||
"integrity": "sha1-U4tm8+5izRq1HsMjgp0flIDHS+s=",
|
||||
"requires": {
|
||||
"iconv-lite": "~0.4.13"
|
||||
}
|
||||
},
|
||||
"end-of-stream": {
|
||||
"version": "1.4.1",
|
||||
"resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.1.tgz",
|
||||
@@ -5526,9 +5513,9 @@
|
||||
}
|
||||
},
|
||||
"enhanced-resolve": {
|
||||
"version": "4.1.1",
|
||||
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.1.1.tgz",
|
||||
"integrity": "sha512-98p2zE+rL7/g/DzMHMTF4zZlCgeVdJ7yr6xzEpJRYwFYrGi9ANdn5DnJURg6RpBkyk60XYDnWIv51VfIhfNGuA==",
|
||||
"version": "4.3.0",
|
||||
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.3.0.tgz",
|
||||
"integrity": "sha512-3e87LvavsdxyoCfGusJnrZ5G8SLPOFeHSNpZI/ATL9a5leXo2k0w6MKnbqhdBad9qTobSfB20Ld7UmgoNbAZkQ==",
|
||||
"requires": {
|
||||
"graceful-fs": "^4.1.2",
|
||||
"memory-fs": "^0.5.0",
|
||||
@@ -5886,9 +5873,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": {
|
||||
"version": "4.17.15",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
|
||||
"integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==",
|
||||
"version": "4.17.20",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz",
|
||||
"integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==",
|
||||
"dev": true
|
||||
},
|
||||
"react-is": {
|
||||
@@ -6162,11 +6149,18 @@
|
||||
"dev": true
|
||||
},
|
||||
"esrecurse": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz",
|
||||
"integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==",
|
||||
"version": "4.3.0",
|
||||
"resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
|
||||
"integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
|
||||
"requires": {
|
||||
"estraverse": "^4.1.0"
|
||||
"estraverse": "^5.2.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"estraverse": {
|
||||
"version": "5.2.0",
|
||||
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz",
|
||||
"integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"estraverse": {
|
||||
@@ -6193,9 +6187,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"events": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/events/-/events-3.1.0.tgz",
|
||||
"integrity": "sha512-Rv+u8MLHNOdMjTAFeT3nCjHn2aGlx435FP/sDHNaRhDEMwyI/aB22Kj2qIN8R0cw3z28psEQLYwxVKLsKrMgWg=="
|
||||
"version": "3.2.0",
|
||||
"resolved": "https://registry.npmjs.org/events/-/events-3.2.0.tgz",
|
||||
"integrity": "sha512-/46HWwbfCX2xTawVfkKLGxMifJYQBWMwY1mjywRtb4c9x8l5NP3KoJtnIOiL1hfdRkIuYhETxQlo62IF8tcnlg=="
|
||||
},
|
||||
"eventsource": {
|
||||
"version": "1.0.7",
|
||||
@@ -6722,27 +6716,6 @@
|
||||
"bser": "2.1.1"
|
||||
}
|
||||
},
|
||||
"fbjs": {
|
||||
"version": "0.8.16",
|
||||
"resolved": "https://registry.npmjs.org/fbjs/-/fbjs-0.8.16.tgz",
|
||||
"integrity": "sha1-XmdDL1UNxBtXK/VYR7ispk5TN9s=",
|
||||
"requires": {
|
||||
"core-js": "^1.0.0",
|
||||
"isomorphic-fetch": "^2.1.1",
|
||||
"loose-envify": "^1.0.0",
|
||||
"object-assign": "^4.1.0",
|
||||
"promise": "^7.1.1",
|
||||
"setimmediate": "^1.0.5",
|
||||
"ua-parser-js": "^0.7.9"
|
||||
},
|
||||
"dependencies": {
|
||||
"core-js": {
|
||||
"version": "1.2.7",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-1.2.7.tgz",
|
||||
"integrity": "sha1-ZSKUwUZR2yj6k70tX/KYOk8IxjY="
|
||||
}
|
||||
}
|
||||
},
|
||||
"figgy-pudding": {
|
||||
"version": "3.5.2",
|
||||
"resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.2.tgz",
|
||||
@@ -7013,12 +6986,12 @@
|
||||
}
|
||||
},
|
||||
"flush-write-stream": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.0.3.tgz",
|
||||
"integrity": "sha512-calZMC10u0FMUqoiunI2AiGIIUtUIvifNwkHhNupZH4cbNnW1Itkoh/Nf5HFYmDrwWPjrUxpkZT0KhuCq0jmGw==",
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz",
|
||||
"integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==",
|
||||
"requires": {
|
||||
"inherits": "^2.0.1",
|
||||
"readable-stream": "^2.0.4"
|
||||
"inherits": "^2.0.3",
|
||||
"readable-stream": "^2.3.6"
|
||||
}
|
||||
},
|
||||
"follow-redirects": {
|
||||
@@ -8376,9 +8349,9 @@
|
||||
}
|
||||
},
|
||||
"lodash": {
|
||||
"version": "4.17.15",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
|
||||
"integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==",
|
||||
"version": "4.17.20",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz",
|
||||
"integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==",
|
||||
"dev": true
|
||||
},
|
||||
"tapable": {
|
||||
@@ -8651,14 +8624,6 @@
|
||||
"resolved": "https://registry.npmjs.org/humanize/-/humanize-0.0.9.tgz",
|
||||
"integrity": "sha1-GZT/rs3+nEQe0r2sdFK3u0yeQaQ="
|
||||
},
|
||||
"iconv-lite": {
|
||||
"version": "0.4.23",
|
||||
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz",
|
||||
"integrity": "sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==",
|
||||
"requires": {
|
||||
"safer-buffer": ">= 2.1.2 < 3"
|
||||
}
|
||||
},
|
||||
"icss-utils": {
|
||||
"version": "4.1.1",
|
||||
"resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-4.1.1.tgz",
|
||||
@@ -8677,9 +8642,9 @@
|
||||
}
|
||||
},
|
||||
"ieee754": {
|
||||
"version": "1.1.13",
|
||||
"resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz",
|
||||
"integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg=="
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
|
||||
"integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA=="
|
||||
},
|
||||
"iferr": {
|
||||
"version": "0.1.5",
|
||||
@@ -8746,9 +8711,9 @@
|
||||
"integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
|
||||
},
|
||||
"ini": {
|
||||
"version": "1.3.5",
|
||||
"resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz",
|
||||
"integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==",
|
||||
"version": "1.3.8",
|
||||
"resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
|
||||
"integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==",
|
||||
"dev": true
|
||||
},
|
||||
"internal-ip": {
|
||||
@@ -9013,7 +8978,8 @@
|
||||
"is-stream": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz",
|
||||
"integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ="
|
||||
"integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=",
|
||||
"dev": true
|
||||
},
|
||||
"is-subset": {
|
||||
"version": "0.1.1",
|
||||
@@ -9068,15 +9034,6 @@
|
||||
"resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
|
||||
"integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8="
|
||||
},
|
||||
"isomorphic-fetch": {
|
||||
"version": "2.2.1",
|
||||
"resolved": "https://registry.npmjs.org/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz",
|
||||
"integrity": "sha1-YRrhrPFPXoH3KVB0coGf6XM1WKk=",
|
||||
"requires": {
|
||||
"node-fetch": "^1.0.1",
|
||||
"whatwg-fetch": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"isstream": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz",
|
||||
@@ -10573,9 +10530,9 @@
|
||||
}
|
||||
},
|
||||
"lodash": {
|
||||
"version": "4.17.14",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.14.tgz",
|
||||
"integrity": "sha512-mmKYbW3GLuJeX+iGP+Y7Gp1AiGHGbXHCOh/jZmrawMmsE7MS4znI3RL2FsjbqOyMayHInjOeykW7PEajUk1/xw==",
|
||||
"version": "4.17.20",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz",
|
||||
"integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==",
|
||||
"dev": true
|
||||
},
|
||||
"lodash._baseisequal": {
|
||||
@@ -10682,6 +10639,14 @@
|
||||
"tslib": "^1.10.0"
|
||||
}
|
||||
},
|
||||
"lru-cache": {
|
||||
"version": "5.1.1",
|
||||
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
|
||||
"integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
|
||||
"requires": {
|
||||
"yallist": "^3.0.2"
|
||||
}
|
||||
},
|
||||
"make-dir": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz",
|
||||
@@ -10868,9 +10833,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"bn.js": {
|
||||
"version": "4.11.8",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
|
||||
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
|
||||
"version": "4.11.9",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
|
||||
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw=="
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -10993,6 +10958,23 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"mississippi": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/mississippi/-/mississippi-3.0.0.tgz",
|
||||
"integrity": "sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==",
|
||||
"requires": {
|
||||
"concat-stream": "^1.5.0",
|
||||
"duplexify": "^3.4.2",
|
||||
"end-of-stream": "^1.1.0",
|
||||
"flush-write-stream": "^1.0.0",
|
||||
"from2": "^2.1.0",
|
||||
"parallel-transform": "^1.1.0",
|
||||
"pump": "^3.0.0",
|
||||
"pumpify": "^1.3.3",
|
||||
"stream-each": "^1.1.0",
|
||||
"through2": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"mixin-deep": {
|
||||
"version": "1.3.2",
|
||||
"resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz",
|
||||
@@ -11165,19 +11147,10 @@
|
||||
"tslib": "^1.10.0"
|
||||
}
|
||||
},
|
||||
"node-fetch": {
|
||||
"version": "1.6.3",
|
||||
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-1.6.3.tgz",
|
||||
"integrity": "sha1-3CNO3WSJmC1Y6PDbT2lQKavNjAQ=",
|
||||
"requires": {
|
||||
"encoding": "^0.1.11",
|
||||
"is-stream": "^1.0.1"
|
||||
}
|
||||
},
|
||||
"node-forge": {
|
||||
"version": "0.9.0",
|
||||
"resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.9.0.tgz",
|
||||
"integrity": "sha512-7ASaDa3pD+lJ3WvXFsxekJQelBKRpne+GOVbLbtHYdd7pFspyeuJHnWfLplGf3SwKGbfs/aYl5V/JCIaHVUKKQ==",
|
||||
"version": "0.10.0",
|
||||
"resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.10.0.tgz",
|
||||
"integrity": "sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA==",
|
||||
"dev": true
|
||||
},
|
||||
"node-int64": {
|
||||
@@ -11768,11 +11741,11 @@
|
||||
"integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw=="
|
||||
},
|
||||
"parallel-transform": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/parallel-transform/-/parallel-transform-1.1.0.tgz",
|
||||
"integrity": "sha1-1BDwZbBdojCB/NEPKIVMKb2jOwY=",
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/parallel-transform/-/parallel-transform-1.2.0.tgz",
|
||||
"integrity": "sha512-P2vSmIu38uIlvdcU7fDkyrxj33gTUy/ABO5ZUbGowxNCopBq/OoD42bP4UmMrJoPyk4Uqf0mu3mtWBhHCZD8yg==",
|
||||
"requires": {
|
||||
"cyclist": "~0.2.2",
|
||||
"cyclist": "^1.0.1",
|
||||
"inherits": "^2.0.3",
|
||||
"readable-stream": "^2.1.5"
|
||||
}
|
||||
@@ -11788,13 +11761,12 @@
|
||||
}
|
||||
},
|
||||
"parse-asn1": {
|
||||
"version": "5.1.5",
|
||||
"resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.5.tgz",
|
||||
"integrity": "sha512-jkMYn1dcJqF6d5CpU689bq7w/b5ALS9ROVSpQDPrZsqqesUJii9qutvoT5ltGedNXMO2e16YUWIghG9KxaViTQ==",
|
||||
"version": "5.1.6",
|
||||
"resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.6.tgz",
|
||||
"integrity": "sha512-RnZRo1EPU6JBnra2vGHj0yhp6ebyjBZpmUCLHWiFhxlzvBCCpAuZ7elsBp1PVAbQN0/04VD/19rfzlBSwLstMw==",
|
||||
"requires": {
|
||||
"asn1.js": "^4.0.0",
|
||||
"asn1.js": "^5.2.0",
|
||||
"browserify-aes": "^1.0.0",
|
||||
"create-hash": "^1.1.0",
|
||||
"evp_bytestokey": "^1.0.0",
|
||||
"pbkdf2": "^3.0.3",
|
||||
"safe-buffer": "^5.1.1"
|
||||
@@ -11913,9 +11885,9 @@
|
||||
}
|
||||
},
|
||||
"pbkdf2": {
|
||||
"version": "3.0.17",
|
||||
"resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.0.17.tgz",
|
||||
"integrity": "sha512-U/il5MsrZp7mGg3mSQfn742na2T+1/vHDCG5/iTI3X9MKUuYUZVLQhyRsg06mCgDBTd57TxzgZt7P+fYfjRLtA==",
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.1.tgz",
|
||||
"integrity": "sha512-4Ejy1OPxi9f2tt1rRV7Go7zmfDQ+ZectEQz3VGUQhgq62HtIRPDyG/JtnwIxs6x3uNMwo2V7q1fMvKjb+Tnpqg==",
|
||||
"requires": {
|
||||
"create-hash": "^1.1.2",
|
||||
"create-hmac": "^1.1.4",
|
||||
@@ -12218,6 +12190,8 @@
|
||||
"version": "7.3.1",
|
||||
"resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz",
|
||||
"integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==",
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"requires": {
|
||||
"asap": "~2.0.3"
|
||||
}
|
||||
@@ -12238,13 +12212,23 @@
|
||||
}
|
||||
},
|
||||
"prop-types": {
|
||||
"version": "15.6.0",
|
||||
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.6.0.tgz",
|
||||
"integrity": "sha1-zq8IMCL8RrSjX2nhPvda7Q1jmFY=",
|
||||
"version": "15.7.2",
|
||||
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.7.2.tgz",
|
||||
"integrity": "sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ==",
|
||||
"requires": {
|
||||
"fbjs": "^0.8.16",
|
||||
"loose-envify": "^1.3.1",
|
||||
"object-assign": "^4.1.1"
|
||||
"loose-envify": "^1.4.0",
|
||||
"object-assign": "^4.1.1",
|
||||
"react-is": "^16.8.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"loose-envify": {
|
||||
"version": "1.4.0",
|
||||
"resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
|
||||
"integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
|
||||
"requires": {
|
||||
"js-tokens": "^3.0.0 || ^4.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"prop-types-exact": {
|
||||
@@ -12330,16 +12314,16 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"bn.js": {
|
||||
"version": "4.11.8",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
|
||||
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
|
||||
"version": "4.11.9",
|
||||
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
|
||||
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"pump": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz",
|
||||
"integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==",
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
|
||||
"integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
|
||||
"requires": {
|
||||
"end-of-stream": "^1.1.0",
|
||||
"once": "^1.3.1"
|
||||
@@ -12353,6 +12337,17 @@
|
||||
"duplexify": "^3.6.0",
|
||||
"inherits": "^2.0.3",
|
||||
"pump": "^2.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"pump": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz",
|
||||
"integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==",
|
||||
"requires": {
|
||||
"end-of-stream": "^1.1.0",
|
||||
"once": "^1.3.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"punycode": {
|
||||
@@ -13492,9 +13487,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": {
|
||||
"version": "4.17.15",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
|
||||
"integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==",
|
||||
"version": "4.17.20",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz",
|
||||
"integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
@@ -13958,12 +13953,12 @@
|
||||
"dev": true
|
||||
},
|
||||
"selfsigned": {
|
||||
"version": "1.10.7",
|
||||
"resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.7.tgz",
|
||||
"integrity": "sha512-8M3wBCzeWIJnQfl43IKwOmC4H/RAp50S8DF60znzjW5GVqTcSe2vWclt7hmYVPkKPlHWOu5EaWOMZ2Y6W8ZXTA==",
|
||||
"version": "1.10.8",
|
||||
"resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.8.tgz",
|
||||
"integrity": "sha512-2P4PtieJeEwVgTU9QEcwIRDQ/mXJLX8/+I3ur+Pg16nS8oNbrGxEso9NyYWy8NAmXiNl4dlAp5MwoNeCWzON4w==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"node-forge": "0.9.0"
|
||||
"node-forge": "^0.10.0"
|
||||
}
|
||||
},
|
||||
"semver": {
|
||||
@@ -14001,10 +13996,13 @@
|
||||
}
|
||||
},
|
||||
"serialize-javascript": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-3.0.0.tgz",
|
||||
"integrity": "sha512-skZcHYw2vEX4bw90nAr2iTTsz6x2SrHEnfxgKYmZlvJYBEZrvbKtobJWlQ20zczKb3bsHHXXTYt48zBA7ni9cw==",
|
||||
"dev": true
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-3.1.0.tgz",
|
||||
"integrity": "sha512-JIJT1DGiWmIKhzRsG91aS6Ze4sFUrYbltlkg2onR5OrnNM02Kl/hnY/T4FN2omvyeBbQmMJv+K4cPOpGzOTFBg==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"randombytes": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"serializerr": {
|
||||
"version": "1.0.3",
|
||||
@@ -14644,9 +14642,9 @@
|
||||
}
|
||||
},
|
||||
"stream-shift": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.0.tgz",
|
||||
"integrity": "sha1-1cdSgl5TZ+eG944Y5EXqIjoVWVI="
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz",
|
||||
"integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ=="
|
||||
},
|
||||
"strict-uri-encode": {
|
||||
"version": "2.0.0",
|
||||
@@ -15356,26 +15354,21 @@
|
||||
}
|
||||
},
|
||||
"terser-webpack-plugin": {
|
||||
"version": "1.4.3",
|
||||
"resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.3.tgz",
|
||||
"integrity": "sha512-QMxecFz/gHQwteWwSo5nTc6UaICqN1bMedC5sMtUc7y3Ha3Q8y6ZO0iCR8pq4RJC8Hjf0FEPEHZqcMB/+DFCrA==",
|
||||
"version": "1.4.5",
|
||||
"resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.5.tgz",
|
||||
"integrity": "sha512-04Rfe496lN8EYruwi6oPQkG0vo8C+HT49X687FZnpPF0qMAIHONI6HEXYPKDOE8e5HjXTyKfqRd/agHtH0kOtw==",
|
||||
"requires": {
|
||||
"cacache": "^12.0.2",
|
||||
"find-cache-dir": "^2.1.0",
|
||||
"is-wsl": "^1.1.0",
|
||||
"schema-utils": "^1.0.0",
|
||||
"serialize-javascript": "^2.1.2",
|
||||
"serialize-javascript": "^4.0.0",
|
||||
"source-map": "^0.6.1",
|
||||
"terser": "^4.1.2",
|
||||
"webpack-sources": "^1.4.0",
|
||||
"worker-farm": "^1.7.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"bluebird": {
|
||||
"version": "3.7.2",
|
||||
"resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
|
||||
"integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg=="
|
||||
},
|
||||
"cacache": {
|
||||
"version": "12.0.4",
|
||||
"resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.4.tgz",
|
||||
@@ -15443,14 +15436,6 @@
|
||||
"path-exists": "^3.0.0"
|
||||
}
|
||||
},
|
||||
"lru-cache": {
|
||||
"version": "5.1.1",
|
||||
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
|
||||
"integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
|
||||
"requires": {
|
||||
"yallist": "^3.0.2"
|
||||
}
|
||||
},
|
||||
"make-dir": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz",
|
||||
@@ -15460,23 +15445,6 @@
|
||||
"semver": "^5.6.0"
|
||||
}
|
||||
},
|
||||
"mississippi": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/mississippi/-/mississippi-3.0.0.tgz",
|
||||
"integrity": "sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==",
|
||||
"requires": {
|
||||
"concat-stream": "^1.5.0",
|
||||
"duplexify": "^3.4.2",
|
||||
"end-of-stream": "^1.1.0",
|
||||
"flush-write-stream": "^1.0.0",
|
||||
"from2": "^2.1.0",
|
||||
"parallel-transform": "^1.1.0",
|
||||
"pump": "^3.0.0",
|
||||
"pumpify": "^1.3.3",
|
||||
"stream-each": "^1.1.0",
|
||||
"through2": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"p-limit": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
|
||||
@@ -15511,24 +15479,13 @@
|
||||
"find-up": "^3.0.0"
|
||||
}
|
||||
},
|
||||
"pump": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
|
||||
"integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
|
||||
"requires": {
|
||||
"end-of-stream": "^1.1.0",
|
||||
"once": "^1.3.1"
|
||||
}
|
||||
},
|
||||
"serialize-javascript": {
|
||||
"version": "2.1.2",
|
||||
"resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-2.1.2.tgz",
|
||||
"integrity": "sha512-rs9OggEUF0V4jUSecXazOYsLfu7OGK2qIn3c7IPBiffz32XniEp/TX9Xmc9LQfK2nQ2QKHvZ2oygKUGU0lG4jQ=="
|
||||
},
|
||||
"source-list-map": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz",
|
||||
"integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw=="
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-4.0.0.tgz",
|
||||
"integrity": "sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==",
|
||||
"requires": {
|
||||
"randombytes": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"source-map": {
|
||||
"version": "0.6.1",
|
||||
@@ -15542,15 +15499,6 @@
|
||||
"requires": {
|
||||
"figgy-pudding": "^3.5.1"
|
||||
}
|
||||
},
|
||||
"webpack-sources": {
|
||||
"version": "1.4.3",
|
||||
"resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz",
|
||||
"integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==",
|
||||
"requires": {
|
||||
"source-list-map": "^2.0.0",
|
||||
"source-map": "~0.6.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -15589,9 +15537,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"timers-browserify": {
|
||||
"version": "2.0.11",
|
||||
"resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.11.tgz",
|
||||
"integrity": "sha512-60aV6sgJ5YEbzUdn9c8kYGIqOubPoUdqQCul3SBAsRCZ40s6Y5cMcrW4dt3/k/EsbLVJNl9n6Vz3fTc+k2GeKQ==",
|
||||
"version": "2.0.12",
|
||||
"resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz",
|
||||
"integrity": "sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==",
|
||||
"requires": {
|
||||
"setimmediate": "^1.0.4"
|
||||
}
|
||||
@@ -15790,11 +15738,6 @@
|
||||
"resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz",
|
||||
"integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c="
|
||||
},
|
||||
"ua-parser-js": {
|
||||
"version": "0.7.20",
|
||||
"resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.20.tgz",
|
||||
"integrity": "sha512-8OaIKfzL5cpx8eCMAhhvTlft8GYF8b2eQr6JkCyVdrgjcytyOmPCXrqXFcUnhonRpLlh5yxEZVohm6mzaowUOw=="
|
||||
},
|
||||
"uglify-js": {
|
||||
"version": "3.9.3",
|
||||
"resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.9.3.tgz",
|
||||
@@ -16172,20 +16115,20 @@
|
||||
}
|
||||
},
|
||||
"watchpack": {
|
||||
"version": "1.7.2",
|
||||
"resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.7.2.tgz",
|
||||
"integrity": "sha512-ymVbbQP40MFTp+cNMvpyBpBtygHnPzPkHqoIwRRj/0B8KhqQwV8LaKjtbaxF2lK4vl8zN9wCxS46IFCU5K4W0g==",
|
||||
"version": "1.7.5",
|
||||
"resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.7.5.tgz",
|
||||
"integrity": "sha512-9P3MWk6SrKjHsGkLT2KHXdQ/9SNkyoJbabxnKOoJepsvJjJG8uYTR3yTPxPQvNDI3w4Nz1xnE0TLHK4RIVe/MQ==",
|
||||
"requires": {
|
||||
"chokidar": "^3.4.0",
|
||||
"chokidar": "^3.4.1",
|
||||
"graceful-fs": "^4.1.2",
|
||||
"neo-async": "^2.5.0",
|
||||
"watchpack-chokidar2": "^2.0.0"
|
||||
"watchpack-chokidar2": "^2.0.1"
|
||||
}
|
||||
},
|
||||
"watchpack-chokidar2": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/watchpack-chokidar2/-/watchpack-chokidar2-2.0.0.tgz",
|
||||
"integrity": "sha512-9TyfOyN/zLUbA288wZ8IsMZ+6cbzvsNyEzSBp6e/zkifi6xxbl8SmQ/CxQq32k8NNqrdVEVUVSEf56L4rQ/ZxA==",
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/watchpack-chokidar2/-/watchpack-chokidar2-2.0.1.tgz",
|
||||
"integrity": "sha512-nCFfBIPKr5Sh61s4LPpy1Wtfi0HE8isJ3d2Yb5/Ppw2P2B/3eVSEBjKfN0fmHJSK14+31KwMKmcrzs2GM4P0Ww==",
|
||||
"optional": true,
|
||||
"requires": {
|
||||
"chokidar": "^2.1.8"
|
||||
@@ -16447,9 +16390,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"webpack": {
|
||||
"version": "4.43.0",
|
||||
"resolved": "https://registry.npmjs.org/webpack/-/webpack-4.43.0.tgz",
|
||||
"integrity": "sha512-GW1LjnPipFW2Y78OOab8NJlCflB7EFskMih2AHdvjbpKMeDJqEgSx24cXXXiPS65+WSwVyxtDsJH6jGX2czy+g==",
|
||||
"version": "4.44.2",
|
||||
"resolved": "https://registry.npmjs.org/webpack/-/webpack-4.44.2.tgz",
|
||||
"integrity": "sha512-6KJVGlCxYdISyurpQ0IPTklv+DULv05rs2hseIXer6D7KrUicRDLFb4IUM1S6LUAKypPM/nSiVSuv8jHu1m3/Q==",
|
||||
"requires": {
|
||||
"@webassemblyjs/ast": "1.9.0",
|
||||
"@webassemblyjs/helper-module-context": "1.9.0",
|
||||
@@ -16459,7 +16402,7 @@
|
||||
"ajv": "^6.10.2",
|
||||
"ajv-keywords": "^3.4.1",
|
||||
"chrome-trace-event": "^1.0.2",
|
||||
"enhanced-resolve": "^4.1.0",
|
||||
"enhanced-resolve": "^4.3.0",
|
||||
"eslint-scope": "^4.0.3",
|
||||
"json-parse-better-errors": "^1.0.2",
|
||||
"loader-runner": "^2.4.0",
|
||||
@@ -16472,14 +16415,14 @@
|
||||
"schema-utils": "^1.0.0",
|
||||
"tapable": "^1.1.3",
|
||||
"terser-webpack-plugin": "^1.4.3",
|
||||
"watchpack": "^1.6.1",
|
||||
"watchpack": "^1.7.4",
|
||||
"webpack-sources": "^1.4.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"ajv": {
|
||||
"version": "6.12.2",
|
||||
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.2.tgz",
|
||||
"integrity": "sha512-k+V+hzjm5q/Mr8ef/1Y9goCmlsK4I6Sm74teeyGvFk1XrOsbsKLjEdrvny42CZ+a8sXbk8KWpY/bDwS+FLL2UQ==",
|
||||
"version": "6.12.6",
|
||||
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
|
||||
"integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
|
||||
"requires": {
|
||||
"fast-deep-equal": "^3.1.1",
|
||||
"fast-json-stable-stringify": "^2.0.0",
|
||||
@@ -16578,9 +16521,9 @@
|
||||
}
|
||||
},
|
||||
"fast-deep-equal": {
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.1.tgz",
|
||||
"integrity": "sha512-8UEa58QDLauDNfpbrX55Q9jrGHThw2ZMdOky5Gl1CDtVeJDPVrG4Jxx1N8jw2gkWaff5UUuX1KJd+9zGe2B+ZA=="
|
||||
"version": "3.1.3",
|
||||
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
|
||||
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="
|
||||
},
|
||||
"is-accessor-descriptor": {
|
||||
"version": "0.1.6",
|
||||
@@ -16661,29 +16604,10 @@
|
||||
"to-regex": "^3.0.2"
|
||||
}
|
||||
},
|
||||
"source-list-map": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz",
|
||||
"integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw=="
|
||||
},
|
||||
"source-map": {
|
||||
"version": "0.6.1",
|
||||
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
|
||||
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
|
||||
},
|
||||
"tapable": {
|
||||
"version": "1.1.3",
|
||||
"resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz",
|
||||
"integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA=="
|
||||
},
|
||||
"webpack-sources": {
|
||||
"version": "1.4.3",
|
||||
"resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz",
|
||||
"integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==",
|
||||
"requires": {
|
||||
"source-list-map": "^2.0.0",
|
||||
"source-map": "~0.6.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -17605,7 +17529,6 @@
|
||||
"version": "1.4.3",
|
||||
"resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz",
|
||||
"integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"source-list-map": "^2.0.0",
|
||||
"source-map": "~0.6.1"
|
||||
@@ -17614,14 +17537,12 @@
|
||||
"source-list-map": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz",
|
||||
"integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw==",
|
||||
"dev": true
|
||||
"integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw=="
|
||||
},
|
||||
"source-map": {
|
||||
"version": "0.6.1",
|
||||
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
|
||||
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
|
||||
"dev": true
|
||||
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -17660,11 +17581,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"whatwg-fetch": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.0.0.tgz",
|
||||
"integrity": "sha512-9GSJUgz1D4MfyKU7KRqwOjXCXTqWdFNvEr7eUBYchQiVc744mqK/MzXPNR2WsPkmkOa4ywfg8C2n8h+13Bey1Q=="
|
||||
},
|
||||
"whatwg-mimetype": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz",
|
||||
@@ -17780,9 +17696,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"xtend": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.1.tgz",
|
||||
"integrity": "sha1-pcbVMr5lbiPbgg77lDofBJmNY68="
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
|
||||
"integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="
|
||||
},
|
||||
"y18n": {
|
||||
"version": "4.0.0",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -9,7 +9,7 @@ function _init() {
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
set -e
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
function start_minio_server()
|
||||
{
|
||||
MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 \
|
||||
minio --quiet --json server /data --address 127.0.0.1:24242 > server.log 2>&1 &
|
||||
server_pid=$!
|
||||
sleep 10
|
||||
|
||||
echo "$server_pid"
|
||||
}
|
||||
|
||||
function start_minio_gateway_s3()
|
||||
{
|
||||
MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 \
|
||||
minio --quiet --json gateway s3 http://127.0.0.1:24242 \
|
||||
--address 127.0.0.1:24240 > gateway.log 2>&1 &
|
||||
gw_pid=$!
|
||||
sleep 10
|
||||
|
||||
echo "$gw_pid"
|
||||
}
|
||||
|
||||
function main()
|
||||
{
|
||||
sr_pid="$(start_minio_server)"
|
||||
gw_pid="$(start_minio_gateway_s3)"
|
||||
|
||||
SERVER_ENDPOINT=127.0.0.1:24240 ENABLE_HTTPS=0 ACCESS_KEY=minio \
|
||||
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh \
|
||||
aws-sdk-go aws-sdk-java aws-sdk-php aws-sdk-ruby awscli \
|
||||
healthcheck mc minio-dotnet minio-js \
|
||||
minio-py s3cmd s3select security
|
||||
rv=$?
|
||||
|
||||
kill "$sr_pid"
|
||||
kill "$gw_pid"
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
echo "=========== Gateway ==========="
|
||||
cat "gateway.log"
|
||||
echo "=========== Server ==========="
|
||||
cat "server.log"
|
||||
fi
|
||||
|
||||
rm -f gateway.log server.log
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -44,10 +44,21 @@ func releaseTag(version string) string {
|
||||
relPrefix = prefix
|
||||
}
|
||||
|
||||
relSuffix := ""
|
||||
if hotfix := os.Getenv("MINIO_HOTFIX"); hotfix != "" {
|
||||
relSuffix = hotfix
|
||||
}
|
||||
|
||||
relTag := strings.Replace(version, " ", "-", -1)
|
||||
relTag = strings.Replace(relTag, ":", "-", -1)
|
||||
relTag = strings.Replace(relTag, ",", "", -1)
|
||||
return relPrefix + "." + relTag
|
||||
relTag = relPrefix + "." + relTag
|
||||
|
||||
if relSuffix != "" {
|
||||
relTag += "." + relSuffix
|
||||
}
|
||||
|
||||
return relTag
|
||||
}
|
||||
|
||||
// commitID returns the abbreviated commit-id hash of the last commit.
|
||||
@@ -68,5 +79,12 @@ func commitID() string {
|
||||
}
|
||||
|
||||
func main() {
|
||||
fmt.Println(genLDFlags(time.Now().UTC().Format(time.RFC3339)))
|
||||
var version string
|
||||
if len(os.Args) > 1 {
|
||||
version = os.Args[1]
|
||||
} else {
|
||||
version = time.Now().UTC().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
fmt.Println(genLDFlags(version))
|
||||
}
|
||||
|
||||
@@ -3,5 +3,5 @@
|
||||
set -e
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
CGO_ENABLED=1 go test -v -race --timeout 50m "$d"
|
||||
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
|
||||
done
|
||||
|
||||
@@ -45,88 +45,64 @@ FUNCTIONAL_TESTS="$WORK_DIR/functional-tests.sh"
|
||||
function start_minio_fs()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 10
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure_sets()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk-sets{1...32}" >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
|
||||
"${MINIO[@]}" server > "$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets()
|
||||
function start_minio_pool_erasure_sets()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/pool-disk-sets{1...4} http://127.0.0.1:9001${WORK_DIR}/pool-disk-sets{5...8}"
|
||||
"${MINIO[@]}" server --address ":9000" > "$WORK_DIR/pool-minio-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address ":9001" > "$WORK_DIR/pool-minio-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets_ipv6()
|
||||
function start_minio_pool_erasure_sets_ipv6()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets{5...8}"
|
||||
"${MINIO[@]}" server --address="[::1]:9000" > "$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" > "$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address=:9002 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/dist-disk1 http://127.0.0.1:9001${WORK_DIR}/dist-disk2 http://127.0.0.1:9002${WORK_DIR}/dist-disk3 http://127.0.0.1:9003${WORK_DIR}/dist-disk4"
|
||||
for i in $(seq 0 3); do
|
||||
"${MINIO[@]}" server --address ":900${i}" > "$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
|
||||
done
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function run_test_fs()
|
||||
{
|
||||
minio_pid="$(start_minio_fs)"
|
||||
start_minio_fs
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -137,13 +113,14 @@ function run_test_fs()
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_erasure_sets() {
|
||||
minio_pid="$(start_minio_erasure_sets)"
|
||||
function run_test_erasure_sets()
|
||||
{
|
||||
start_minio_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -154,55 +131,51 @@ function run_test_erasure_sets() {
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_zone_erasure_sets()
|
||||
function run_test_pool_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets) )
|
||||
start_minio_pool_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 1); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/zone-minio-900$i.log"
|
||||
cat "$WORK_DIR/pool-minio-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 1); do
|
||||
rm -f "$WORK_DIR/zone-minio-900$i.log"
|
||||
rm -f "$WORK_DIR/pool-minio-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_zone_erasure_sets_ipv6()
|
||||
function run_test_pool_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets_ipv6) )
|
||||
start_minio_pool_erasure_sets_ipv6
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 1); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/zone-minio-ipv6-900$i.log"
|
||||
cat "$WORK_DIR/pool-minio-ipv6-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 1); do
|
||||
rm -f "$WORK_DIR/zone-minio-ipv6-900$i.log"
|
||||
rm -f "$WORK_DIR/pool-minio-ipv6-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
@@ -210,12 +183,12 @@ function run_test_zone_erasure_sets_ipv6()
|
||||
|
||||
function run_test_erasure()
|
||||
{
|
||||
minio_pid="$(start_minio_erasure)"
|
||||
start_minio_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -228,14 +201,12 @@ function run_test_erasure()
|
||||
|
||||
function run_test_dist_erasure()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure) )
|
||||
start_minio_dist_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -251,7 +222,7 @@ function run_test_dist_erasure()
|
||||
|
||||
rm -f "$WORK_DIR/dist-minio-9000.log" "$WORK_DIR/dist-minio-9001.log" "$WORK_DIR/dist-minio-9002.log" "$WORK_DIR/dist-minio-9003.log"
|
||||
|
||||
return "$rv"
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function purge()
|
||||
@@ -324,14 +295,14 @@ function main()
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Eraure expanded setup"
|
||||
if ! run_test_zone_erasure_sets; then
|
||||
if ! run_test_pool_erasure_sets; then
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Erasure expanded setup with ipv6"
|
||||
if ! run_test_zone_erasure_sets_ipv6; then
|
||||
if ! run_test_pool_erasure_sets_ipv6; then
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
|
||||
@@ -29,44 +29,52 @@ MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
function start_minio_3_node() {
|
||||
declare -a minio_pids
|
||||
declare -a ARGS
|
||||
export MINIO_ACCESS_KEY=minio
|
||||
export MINIO_SECRET_KEY=minio123
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
ARGS+=("http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/")
|
||||
args="$args http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/"
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+1]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
disown "${minio_pids[0]}"
|
||||
"${MINIO[@]}" --address ":$[$start_port+1]" $args > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
disown $!
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+2]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
disown "${minio_pids[1]}"
|
||||
"${MINIO[@]}" --address ":$[$start_port+2]" $args > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
disown $!
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+3]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
disown "${minio_pids[2]}"
|
||||
"${MINIO[@]}" --address ":$[$start_port+3]" $args > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
disown $!
|
||||
|
||||
sleep "$1"
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
if ! kill "$pid"; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$(pgrep -c minio)" -ne 3 ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 1;
|
||||
if pgrep minio; then
|
||||
# forcibly killing, to proceed further properly.
|
||||
kill -9 "$pid"
|
||||
sleep 1 # wait 1sec per pid
|
||||
done
|
||||
if ! pkill -9 minio; then
|
||||
echo "no minio process running anymore, proceed."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ type accessControlPolicy struct {
|
||||
func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketACL", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -125,7 +125,7 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketACL", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -176,7 +176,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutObjectACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutObjectACL", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -240,7 +240,7 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetObjectACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetObjectACL", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
289
cmd/admin-bucket-handlers.go
Normal file
289
cmd/admin-bucket-handlers.go
Normal file
@@ -0,0 +1,289 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
bucketQuotaConfigFile = "quota.json"
|
||||
bucketTargetsFile = "bucket-targets.json"
|
||||
)
|
||||
|
||||
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
|
||||
// ----------
|
||||
// Places a quota configuration on the specified bucket. The quota
|
||||
// specified in the quota configuration will be applied by default
|
||||
// to enforce total quota for the specified bucket.
|
||||
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = parseBucketQuota(bucket, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketQuotaConfigHandler - gets bucket quota configuration
|
||||
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, configData)
|
||||
}
|
||||
|
||||
// SetRemoteTargetHandler - sets a remote target for bucket
|
||||
func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetBucketTarget")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
update := r.URL.Query().Get("update") == "true"
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, _, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
password := cred.SecretKey
|
||||
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
var target madmin.BucketTarget
|
||||
if err = json.Unmarshal(reqBytes, &target); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
sameTarget, _ := isLocalHost(target.URL().Hostname(), target.URL().Port(), globalMinioPort)
|
||||
if sameTarget && bucket == target.TargetBucket {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBucketRemoteIdenticalToSource), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
target.SourceBucket = bucket
|
||||
if !update {
|
||||
target.Arn = globalBucketTargetSys.getRemoteARN(bucket, &target)
|
||||
}
|
||||
if target.Arn == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
tgtBytes, err := json.Marshal(&targets)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(target.Arn)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// ListRemoteTargetsHandler - lists remote target(s) for a bucket or gets a target
|
||||
// for a particular ARN type
|
||||
func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBucketTargets")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arnType := vars["type"]
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
if bucket != "" {
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if _, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
targets := globalBucketTargetSys.ListTargets(ctx, bucket, arnType)
|
||||
data, err := json.Marshal(targets)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// RemoveRemoteTargetHandler - removes a remote target for bucket with specified ARN
|
||||
func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveBucketTarget")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arn := vars["arn"]
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketTargetSys.RemoveTarget(ctx, bucket, arn); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
tgtBytes, err := json.Marshal(&targets)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
@@ -42,14 +42,14 @@ import (
|
||||
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return auth.Credentials{}, nil
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
|
||||
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return cred, nil
|
||||
@@ -62,7 +62,7 @@ func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteConfigKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -104,7 +104,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetConfigKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -131,12 +131,13 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
dynamic, err := cfg.ReadConfig(bytes.NewReader(kvBytes))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -158,6 +159,16 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
if dynamic {
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfig(GlobalContext, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
// If all values were dynamic, tell the client.
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
}
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
@@ -165,7 +176,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetConfigKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -203,7 +214,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ClearConfigHistoryKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "ClearConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -240,7 +251,7 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
|
||||
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RestoreConfigHistoryKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "RestoreConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -266,12 +277,12 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
if _, err = cfg.ReadConfig(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -288,7 +299,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListConfigHistoryKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -328,7 +339,7 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
|
||||
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HelpConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "HelpHistoryKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -356,7 +367,7 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
||||
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetConfig", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -378,12 +389,12 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
|
||||
cfg := newServerConfig()
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
if _, err = cfg.ReadConfig(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -413,7 +424,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetConfig", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
|
||||
@@ -22,8 +22,11 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
@@ -35,14 +38,14 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
var adminAPIErr APIErrorCode
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr = checkAdminRequestAuthType(ctx, r, action, "")
|
||||
cred, adminAPIErr = checkAdminRequestAuth(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil, cred
|
||||
@@ -55,7 +58,7 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveUser")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveUser", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -65,7 +68,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
ok, err := globalIAMSys.IsTempUser(accessKey)
|
||||
ok, _, err := globalIAMSys.IsTempUser(accessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -93,7 +96,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListUsers")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListUsers", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -127,16 +130,43 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetUserInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetUserInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
name := vars["accessKey"]
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKey := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
accessKey = cred.ParentUser
|
||||
}
|
||||
|
||||
implicitPerm := name == accessKey
|
||||
if !implicitPerm {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Action: iampolicy.GetUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
userInfo, err := globalIAMSys.GetUserInfo(name)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -156,7 +186,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "UpdateGroupMembers")
|
||||
|
||||
defer logger.AuditLog(w, r, "UpdateGroupMembers", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -201,7 +231,7 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetGroup", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -230,7 +260,7 @@ func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListGroups")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListGroups", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -256,7 +286,7 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetGroupStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetGroupStatus", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -293,7 +323,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
||||
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetUserStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetUserStatus", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -304,7 +334,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
accessKey := vars["accessKey"]
|
||||
status := vars["status"]
|
||||
|
||||
// Custom IAM policies not allowed for admin user.
|
||||
// This API is not allowed to lookup accessKey user status
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
@@ -328,22 +358,55 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddUser")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddUser", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.CreateUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
vars := mux.Vars(r)
|
||||
accessKey := path.Clean(vars["accessKey"])
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Custom IAM policies not allowed for admin user.
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
// Not allowed to add a user with same access key as root credential
|
||||
if owner && accessKey == cred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if (cred.IsTemp() || cred.IsServiceAccount()) && cred.ParentUser == accessKey {
|
||||
// Incoming access key matches parent user then we should
|
||||
// reject password change requests.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
implicitPerm := accessKey == cred.AccessKey
|
||||
if !implicitPerm {
|
||||
parentUser := cred.ParentUser
|
||||
if parentUser == "" {
|
||||
parentUser = cred.AccessKey
|
||||
}
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: parentUser,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", parentUser, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
@@ -365,7 +428,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalIAMSys.SetUser(accessKey, uinfo); err != nil {
|
||||
if err = globalIAMSys.CreateUser(accessKey, uinfo); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -383,11 +446,11 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -398,6 +461,12 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
@@ -411,18 +480,12 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, createReq.Policy)
|
||||
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, cred.Groups, createReq.Policy)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -462,11 +525,11 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListServiceAccounts")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -517,11 +580,11 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -572,15 +635,15 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// AccountUsageInfoHandler returns usage
|
||||
func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AccountUsageInfo")
|
||||
// AccountInfoHandler returns usage
|
||||
func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AccountInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "AccountUsageInfo", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -627,12 +690,6 @@ func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http
|
||||
return rd, wr
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Load the latest calculated data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
@@ -640,13 +697,44 @@ func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
accountName := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
accountName = cred.ParentUser
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
var buckets []BucketInfo
|
||||
if globalDNSConfig != nil && globalBucketFederation {
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && !IsErrIgnored(err,
|
||||
dns.ErrNoEntriesFound,
|
||||
dns.ErrDomainMissing) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
for _, dnsRecords := range dnsBuckets {
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: dnsRecords[0].Key,
|
||||
Created: dnsRecords[0].CreationDate,
|
||||
})
|
||||
}
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
return buckets[i].Name < buckets[j].Name
|
||||
})
|
||||
} else {
|
||||
buckets, err = objectAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
acctInfo := madmin.AccountUsageInfo{
|
||||
accountName := cred.AccessKey
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, false)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
acctInfo := madmin.AccountInfo{
|
||||
AccountName: accountName,
|
||||
Policy: globalIAMSys.GetCombinedPolicy(policies...),
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
@@ -682,7 +770,7 @@ func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http
|
||||
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicyV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicyV2", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -709,7 +797,7 @@ func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicy", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -722,7 +810,10 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(policy)
|
||||
if err = json.NewEncoder(w).Encode(policy); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
@@ -730,7 +821,7 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPoliciesV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPoliciesV2", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -764,7 +855,7 @@ func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Re
|
||||
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPolicies")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPolicies", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -798,7 +889,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveCannedPolicy", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -826,7 +917,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddCannedPolicy", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -878,7 +969,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetPolicyForUserOrGroup", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -891,7 +982,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
isGroup := vars["isGroup"] == "true"
|
||||
|
||||
if !isGroup {
|
||||
ok, err := globalIAMSys.IsTempUser(entityName)
|
||||
ok, _, err := globalIAMSys.IsTempUser(entityName)
|
||||
if err != nil && err != errNoSuchUser {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -66,7 +66,7 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
||||
// Initialize boot time
|
||||
globalBootTime = UTCNow()
|
||||
|
||||
globalEndpoints = mustGetZoneEndpoints(erasureDirs...)
|
||||
globalEndpoints = mustGetPoolEndpoints(erasureDirs...)
|
||||
|
||||
newAllSubsystems()
|
||||
|
||||
@@ -97,16 +97,9 @@ func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
endpoints := mustGetNewEndpoints(erasureDirs...)
|
||||
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "")
|
||||
if err != nil {
|
||||
removeRoots(erasureDirs)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
endpoints := mustGetPoolEndpoints(erasureDirs...)
|
||||
globalPolicySys = NewPolicySys()
|
||||
objLayer := &erasureZones{zones: make([]*erasureSets, 1)}
|
||||
objLayer.zones[0], err = newErasureSets(ctx, endpoints, storageDisks, format)
|
||||
objLayer, err := newErasureServerPools(ctx, endpoints)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -327,13 +320,13 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
mkParams := func(clientToken string, forceStart, forceStop bool) url.Values {
|
||||
v := url.Values{}
|
||||
if clientToken != "" {
|
||||
v.Add(string(mgmtClientToken), clientToken)
|
||||
v.Add(mgmtClientToken, clientToken)
|
||||
}
|
||||
if forceStart {
|
||||
v.Add(string(mgmtForceStart), "")
|
||||
v.Add(mgmtForceStart, "")
|
||||
}
|
||||
if forceStop {
|
||||
v.Add(string(mgmtForceStop), "")
|
||||
v.Add(mgmtForceStop, "")
|
||||
}
|
||||
return v
|
||||
}
|
||||
@@ -351,11 +344,11 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
}
|
||||
varsArr := []map[string]string{
|
||||
// Invalid cases
|
||||
{string(mgmtPrefix): "objprefix"},
|
||||
{mgmtPrefix: "objprefix"},
|
||||
// Valid cases
|
||||
{},
|
||||
{string(mgmtBucket): "bucket"},
|
||||
{string(mgmtBucket): "bucket", string(mgmtPrefix): "objprefix"},
|
||||
{mgmtBucket: "bucket"},
|
||||
{mgmtBucket: "bucket", mgmtPrefix: "objprefix"},
|
||||
}
|
||||
|
||||
// Body is always valid - we do not test JSON decoding.
|
||||
@@ -364,7 +357,7 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
// Test all combinations!
|
||||
for pIdx, parms := range qParmsArr {
|
||||
for vIdx, vars := range varsArr {
|
||||
_, err := extractHealInitParams(vars, parms, bytes.NewBuffer([]byte(body)))
|
||||
_, err := extractHealInitParams(vars, parms, bytes.NewReader([]byte(body)))
|
||||
isErrCase := false
|
||||
if pIdx < 4 || vIdx < 1 {
|
||||
isErrCase = true
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -63,7 +65,7 @@ var (
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
apiErr := toAPIError(ctx, err)
|
||||
apiErr := toAdminAPIErr(ctx, err)
|
||||
return fmt.Errorf("Heal internal error: %s: %s",
|
||||
apiErr.Code, apiErr.Description)
|
||||
}
|
||||
@@ -85,29 +87,71 @@ type healSequenceStatus struct {
|
||||
|
||||
// structure to hold state of all heal sequences in server memory
|
||||
type allHealState struct {
|
||||
sync.Mutex
|
||||
sync.RWMutex
|
||||
|
||||
// map of heal path to heal sequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healLocalDisks map[Endpoint]struct{}
|
||||
}
|
||||
|
||||
// newHealState - initialize global heal state management
|
||||
func newHealState() *allHealState {
|
||||
healState := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
func newHealState(cleanup bool) *allHealState {
|
||||
hstate := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healLocalDisks: map[Endpoint]struct{}{},
|
||||
}
|
||||
if cleanup {
|
||||
go hstate.periodicHealSeqsClean(GlobalContext)
|
||||
}
|
||||
return hstate
|
||||
}
|
||||
|
||||
go healState.periodicHealSeqsClean(GlobalContext)
|
||||
func (ahs *allHealState) healDriveCount() int {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
return healState
|
||||
return len(ahs.healLocalDisks)
|
||||
}
|
||||
|
||||
func (ahs *allHealState) getHealLocalDisks() Endpoints {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
var endpoints Endpoints
|
||||
for ep := range ahs.healLocalDisks {
|
||||
endpoints = append(endpoints, ep)
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
delete(ahs.healLocalDisks, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
ahs.healLocalDisks[ep] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
periodicTimer := time.NewTimer(time.Minute * 5)
|
||||
defer periodicTimer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-time.After(time.Minute * 5):
|
||||
case <-periodicTimer.C:
|
||||
periodicTimer.Reset(time.Minute * 5)
|
||||
now := UTCNow()
|
||||
ahs.Lock()
|
||||
for path, h := range ahs.healSeqMap {
|
||||
@@ -151,7 +195,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
he, exists := ahs.getHealSequence(path)
|
||||
if !exists {
|
||||
hsp = madmin.HealStopSuccess{
|
||||
ClientToken: "invalid",
|
||||
ClientToken: "unknown",
|
||||
StartTime: UTCNow(),
|
||||
}
|
||||
} else {
|
||||
@@ -190,24 +234,17 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
// `keepHealSeqStateDuration`. This function also launches a
|
||||
// background routine to clean up heal results after the
|
||||
// aforementioned duration.
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) (
|
||||
respBytes []byte, apiErr APIError, errMsg string) {
|
||||
|
||||
existsAndLive := false
|
||||
he, exists := ahs.getHealSequence(pathJoin(h.bucket, h.object))
|
||||
if exists {
|
||||
existsAndLive = !he.hasEnded()
|
||||
}
|
||||
|
||||
if existsAndLive {
|
||||
// A heal sequence exists on the given path.
|
||||
if h.forceStarted {
|
||||
// stop the running heal sequence - wait for it to finish.
|
||||
he.stop()
|
||||
for !he.hasEnded() {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
} else {
|
||||
if h.forceStarted {
|
||||
_, apiErr = ahs.stopHealSequence(pathJoin(h.bucket, h.object))
|
||||
if apiErr.Code != "" {
|
||||
return respBytes, apiErr, ""
|
||||
}
|
||||
} else {
|
||||
oh, exists := ahs.getHealSequence(pathJoin(h.bucket, h.object))
|
||||
if exists && !oh.hasEnded() {
|
||||
errMsg = "Heal is already running on the given path " +
|
||||
"(use force-start option to stop and start afresh). " +
|
||||
fmt.Sprintf("The heal was started by IP %s at %s, token is %s",
|
||||
@@ -224,7 +261,6 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
hpath := pathJoin(h.bucket, h.object)
|
||||
for k, hSeq := range ahs.healSeqMap {
|
||||
if !hSeq.hasEnded() && (HasPrefix(k, hpath) || HasPrefix(hpath, k)) {
|
||||
|
||||
errMsg = "The provided heal sequence path overlaps with an existing " +
|
||||
fmt.Sprintf("heal path: %s", k)
|
||||
return nil, errorCodes.ToAPIErr(ErrHealOverlappingPaths), errMsg
|
||||
@@ -235,7 +271,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
ahs.healSeqMap[hpath] = h
|
||||
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart()
|
||||
go h.healSequenceStart(objAPI)
|
||||
|
||||
clientToken := h.clientToken
|
||||
if globalIsDistErasure {
|
||||
@@ -249,7 +285,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, toAPIError(h.ctx, err), ""
|
||||
return nil, toAdminAPIErr(h.ctx, err), ""
|
||||
}
|
||||
return b, noError, ""
|
||||
}
|
||||
@@ -264,8 +300,11 @@ func (ahs *allHealState) PopHealStatusJSON(hpath string,
|
||||
// fetch heal state for given path
|
||||
h, exists := ahs.getHealSequence(hpath)
|
||||
if !exists {
|
||||
// If there is no such heal sequence, return error.
|
||||
return nil, ErrHealNoSuchProcess
|
||||
// heal sequence doesn't exist, must have finished.
|
||||
jbytes, err := json.Marshal(healSequenceStatus{
|
||||
Summary: healFinishedStatus,
|
||||
})
|
||||
return jbytes, toAdminAPIErrCode(GlobalContext, err)
|
||||
}
|
||||
|
||||
// Check if client-token is valid
|
||||
@@ -490,9 +529,12 @@ func (h *healSequence) isQuitting() bool {
|
||||
// check if the heal sequence has ended
|
||||
func (h *healSequence) hasEnded() bool {
|
||||
h.mutex.RLock()
|
||||
ended := len(h.currentStatus.Items) == 0 || h.currentStatus.Summary == healStoppedStatus || h.currentStatus.Summary == healFinishedStatus
|
||||
h.mutex.RUnlock()
|
||||
return ended
|
||||
defer h.mutex.RUnlock()
|
||||
// background heal never ends
|
||||
if h.clientToken == bgHealingUUID {
|
||||
return false
|
||||
}
|
||||
return !h.endTime.IsZero()
|
||||
}
|
||||
|
||||
// stops the heal sequence - safe to call multiple times.
|
||||
@@ -573,7 +615,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
// routine for completion, and (2) listens for external stop
|
||||
// signals. When either event happens, it sets the finish status for
|
||||
// the heal-sequence.
|
||||
func (h *healSequence) healSequenceStart() {
|
||||
func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
|
||||
// Set status as running
|
||||
h.mutex.Lock()
|
||||
h.currentStatus.Summary = healRunningStatus
|
||||
@@ -581,7 +623,7 @@ func (h *healSequence) healSequenceStart() {
|
||||
h.mutex.Unlock()
|
||||
|
||||
if h.sourceCh == nil {
|
||||
go h.traverseAndHeal()
|
||||
go h.traverseAndHeal(objAPI)
|
||||
} else {
|
||||
go h.healFromSourceCh()
|
||||
}
|
||||
@@ -606,8 +648,7 @@ func (h *healSequence) healSequenceStart() {
|
||||
case <-h.ctx.Done():
|
||||
h.mutex.Lock()
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
|
||||
h.currentStatus.Summary = healFinishedStatus
|
||||
h.mutex.Unlock()
|
||||
|
||||
// drain traverse channel so the traversal
|
||||
@@ -621,7 +662,18 @@ func (h *healSequence) healSequenceStart() {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) logHeal(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
|
||||
globalHealConfigMu.Lock()
|
||||
opts := globalHealConfig
|
||||
globalHealConfigMu.Unlock()
|
||||
|
||||
// Send heal request
|
||||
task := healTask{
|
||||
bucket: source.bucket,
|
||||
@@ -633,6 +685,18 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
}
|
||||
if opts.Bitrot {
|
||||
task.opts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(opts.IOCount, opts.Sleep)
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
|
||||
globalBackgroundHealRoutine.queueHealTask(task)
|
||||
|
||||
select {
|
||||
@@ -640,9 +704,11 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
if !h.reportProgress {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and
|
||||
// return success.
|
||||
// return the error and not calculate this object
|
||||
// as part of the metrics.
|
||||
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
|
||||
return nil
|
||||
// Return the error so that caller can handle it.
|
||||
return res.err
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
@@ -691,24 +757,31 @@ func (h *healSequence) healItemsFromSourceCh() error {
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var itemType madmin.HealItemType
|
||||
switch {
|
||||
case source.bucket == nopHeal:
|
||||
switch source.bucket {
|
||||
case nopHeal:
|
||||
continue
|
||||
case source.bucket == SlashSeparator:
|
||||
case SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
case source.bucket != "" && source.object == "":
|
||||
itemType = madmin.HealItemBucket
|
||||
default:
|
||||
itemType = madmin.HealItemObject
|
||||
if source.object == "" {
|
||||
itemType = madmin.HealItemBucket
|
||||
} else {
|
||||
itemType = madmin.HealItemObject
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(source, itemType); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
switch err.(type) {
|
||||
case ObjectExistsAsDirectory:
|
||||
case ObjectNotFound:
|
||||
case VersionNotFound:
|
||||
default:
|
||||
logger.LogIf(h.ctx, fmt.Errorf("Heal attempt failed for %s: %w",
|
||||
pathJoin(source.bucket, source.object), err))
|
||||
}
|
||||
}
|
||||
|
||||
h.scannedItemsMap[itemType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
@@ -719,28 +792,28 @@ func (h *healSequence) healFromSourceCh() {
|
||||
h.healItemsFromSourceCh()
|
||||
}
|
||||
|
||||
func (h *healSequence) healDiskMeta() error {
|
||||
// Start with format healing
|
||||
if err := h.healDiskFormat(); err != nil {
|
||||
return err
|
||||
func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
|
||||
// Try to pro-actively heal backend-encrypted file.
|
||||
if err := h.queueHealTask(healSource{
|
||||
bucket: minioMetaBucket,
|
||||
object: backendEncryptedFile,
|
||||
}, madmin.HealItemBucketMetadata); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Start healing the config prefix.
|
||||
if err := h.healMinioSysMeta(minioConfigPrefix)(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start healing the bucket config prefix.
|
||||
return h.healMinioSysMeta(bucketConfigPrefix)()
|
||||
return h.healMinioSysMeta(objAPI, minioConfigPrefix)()
|
||||
}
|
||||
|
||||
func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
if err := h.healDiskMeta(); err != nil {
|
||||
func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
if err := h.healDiskMeta(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Heal buckets and objects
|
||||
return h.healBuckets(bucketsOnly)
|
||||
return h.healBuckets(objAPI, bucketsOnly)
|
||||
}
|
||||
|
||||
// traverseAndHeal - traverses on-disk data and performs healing
|
||||
@@ -750,41 +823,40 @@ func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
// quit signal is received, this routine cannot quit immediately and
|
||||
// has to wait until a safe point is reached, such as between scanning
|
||||
// two objects.
|
||||
func (h *healSequence) traverseAndHeal() {
|
||||
func (h *healSequence) traverseAndHeal(objAPI ObjectLayer) {
|
||||
bucketsOnly := false // Heals buckets and objects also.
|
||||
h.traverseAndHealDoneCh <- h.healItems(bucketsOnly)
|
||||
h.traverseAndHealDoneCh <- h.healItems(objAPI, bucketsOnly)
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
// healMinioSysMeta - heals all files under a given meta prefix, returns a function
|
||||
// which in-turn heals the respective meta directory path and any files in int.
|
||||
func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) func() error {
|
||||
return func() error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// NOTE: Healing on meta is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// meta are always upto date and correct.
|
||||
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error {
|
||||
return objAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
herr := h.queueHealTask(healSource{
|
||||
// Skip metacache entries healing
|
||||
if strings.HasPrefix(object, "buckets/.minio.sys/.metacache/") {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemBucketMetadata)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if isErrObjectNotFound(herr) || isErrVersionNotFound(herr) {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return herr
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -796,39 +868,32 @@ func (h *healSequence) healDiskFormat() error {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
return h.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// 1. If a bucket was specified, heal only the bucket.
|
||||
if h.bucket != "" {
|
||||
return h.healBucket(h.bucket, bucketsOnly)
|
||||
return h.healBucket(objAPI, h.bucket, bucketsOnly)
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBucketsHeal(h.ctx)
|
||||
buckets, err := objAPI.ListBuckets(h.ctx)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
// Heal latest buckets first.
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
return buckets[i].Created.After(buckets[j].Created)
|
||||
})
|
||||
|
||||
for _, bucket := range buckets {
|
||||
if err = h.healBucket(bucket.Name, bucketsOnly); err != nil {
|
||||
if err = h.healBucket(objAPI, bucket.Name, bucketsOnly); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -837,15 +902,11 @@ func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
// healBucket - traverses and heals given bucket
|
||||
func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly bool) error {
|
||||
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
|
||||
return err
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if bucketsOnly {
|
||||
@@ -856,9 +917,12 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
if h.object != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
// and if so heal it.
|
||||
_, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
oi, err := objAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
if err == nil {
|
||||
if err = h.healObject(bucket, h.object, ""); err != nil {
|
||||
if err = h.healObject(bucket, h.object, oi.VersionID); err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -867,27 +931,26 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
if err := objAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// healObject - heal the given object and record result
|
||||
func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
return h.queueHealTask(healSource{
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
bucketQuotaConfigFile = "quota.json"
|
||||
)
|
||||
|
||||
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
|
||||
// ----------
|
||||
// Places a quota configuration on the specified bucket. The quota
|
||||
// specified in the quota configuration will be applied by default
|
||||
// to enforce total quota for the specified bucket.
|
||||
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Turn off quota commands if data usage info is unavailable.
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminBucketQuotaDisabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = parseBucketQuota(bucket, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketQuotaConfigHandler - gets bucket quota configuration
|
||||
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, configData)
|
||||
}
|
||||
@@ -20,8 +20,6 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
@@ -112,11 +110,10 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
// -- IAM APIs --
|
||||
|
||||
// Add policy IAM
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceAll(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountUsageInfoHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(httpTraceAll(adminAPI.AccountInfoHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
@@ -171,22 +168,32 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
}
|
||||
|
||||
// Quota operations
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
|
||||
// GetBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
}
|
||||
// GetBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// RemoveRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
}
|
||||
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
if globalIsDistErasure {
|
||||
// Top locks
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
// Force unlocks paths
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/force-unlock").
|
||||
Queries("paths", "{paths:.*}").HandlerFunc(httpTraceHdrs(adminAPI.ForceUnlockHandler))
|
||||
}
|
||||
|
||||
// HTTP Trace
|
||||
@@ -201,23 +208,18 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
|
||||
|
||||
if !globalIsGateway {
|
||||
// -- OBD API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
|
||||
Queries("perfdrive", "{perfdrive:true|false}",
|
||||
"perfnet", "{perfnet:true|false}",
|
||||
"minioinfo", "{minioinfo:true|false}",
|
||||
"minioconfig", "{minioconfig:true|false}",
|
||||
"syscpu", "{syscpu:true|false}",
|
||||
"sysdiskhw", "{sysdiskhw:true|false}",
|
||||
"sysosinfo", "{sysosinfo:true|false}",
|
||||
"sysmem", "{sysmem:true|false}",
|
||||
"sysprocess", "{sysprocess:true|false}",
|
||||
)
|
||||
// Keep obdinfo for backward compatibility with mc
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.HealthInfoHandler))
|
||||
// -- Health API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.HealthInfoHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/bandwidth").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.BandwidthMonitorHandler))
|
||||
}
|
||||
}
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.NotFoundHandler = httpTraceAll(errorResponseHandler)
|
||||
adminRouter.MethodNotAllowedHandler = httpTraceAll(methodNotAllowedHandler("Admin"))
|
||||
}
|
||||
|
||||
@@ -17,23 +17,24 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
|
||||
var disks []madmin.Disk
|
||||
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
|
||||
var localEndpoints Endpoints
|
||||
addr := r.Host
|
||||
if globalIsDistErasure {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
addr = GetLocalPeer(endpointServerPools)
|
||||
}
|
||||
network := make(map[string]string)
|
||||
for _, ep := range endpointZones {
|
||||
for _, ep := range endpointServerPools {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
nodeName := endpoint.Host
|
||||
if nodeName == "" {
|
||||
@@ -42,38 +43,27 @@ func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin
|
||||
if endpoint.IsLocal {
|
||||
// Only proceed for local endpoints
|
||||
network[nodeName] = "online"
|
||||
var di = madmin.Disk{
|
||||
DrivePath: endpoint.Path,
|
||||
}
|
||||
diInfo, err := disk.GetInfo(endpoint.Path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || isSysErrPathNotFound(err) {
|
||||
di.State = madmin.DriveStateMissing
|
||||
} else {
|
||||
di.State = madmin.DriveStateCorrupt
|
||||
}
|
||||
localEndpoints = append(localEndpoints, endpoint)
|
||||
continue
|
||||
}
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
if err := isServerResolvable(endpoint, 2*time.Second); err == nil {
|
||||
network[nodeName] = "online"
|
||||
} else {
|
||||
di.State = madmin.DriveStateOk
|
||||
di.DrivePath = endpoint.Path
|
||||
di.TotalSpace = diInfo.Total
|
||||
di.UsedSpace = diInfo.Total - diInfo.Free
|
||||
di.Utilization = float64((diInfo.Total - diInfo.Free) / diInfo.Total * 100)
|
||||
}
|
||||
disks = append(disks, di)
|
||||
} else {
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
err := IsServerResolvable(endpoint)
|
||||
if err == nil {
|
||||
network[nodeName] = "online"
|
||||
} else {
|
||||
network[nodeName] = "offline"
|
||||
}
|
||||
network[nodeName] = "offline"
|
||||
// log once the error
|
||||
logger.LogOnceIf(context.Background(), err, nodeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
localDisks, _ := initStorageDisksWithErrors(localEndpoints)
|
||||
defer closeStorageDisks(localDisks)
|
||||
|
||||
storageInfo, _ := getStorageInfo(localDisks, localEndpoints.GetAllStrings())
|
||||
|
||||
return madmin.ServerProperties{
|
||||
State: "ok",
|
||||
Endpoint: addr,
|
||||
@@ -81,6 +71,23 @@ func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin
|
||||
Version: Version,
|
||||
CommitID: CommitID,
|
||||
Network: network,
|
||||
Disks: disks,
|
||||
Disks: storageInfo.Disks,
|
||||
}
|
||||
}
|
||||
|
||||
func getLocalDisks(endpointServerPools EndpointServerPools) []madmin.Disk {
|
||||
var localEndpoints Endpoints
|
||||
for _, ep := range endpointServerPools {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
localEndpoints = append(localEndpoints, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
localDisks, _ := initStorageDisksWithErrors(localEndpoints)
|
||||
defer closeStorageDisks(localDisks)
|
||||
storageInfo, _ := getStorageInfo(localDisks, localEndpoints.GetAllStrings())
|
||||
return storageInfo.Disks
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DeletedObject objects deleted
|
||||
@@ -26,12 +27,44 @@ type DeletedObject struct {
|
||||
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
|
||||
ObjectName string `xml:"Key,omitempty"`
|
||||
VersionID string `xml:"VersionId,omitempty"`
|
||||
|
||||
// MinIO extensions to support delete marker replication
|
||||
// Replication status of DeleteMarker
|
||||
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus,omitempty"`
|
||||
// MTime of DeleteMarker on source that needs to be propagated to replica
|
||||
DeleteMarkerMTime DeleteMarkerMTime `xml:"DeleteMarkerMTime,omitempty"`
|
||||
// Status of versioned delete (of object or DeleteMarker)
|
||||
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus,omitempty"`
|
||||
// PurgeTransitioned is nonempty if object is in transition tier
|
||||
PurgeTransitioned string `xml:"PurgeTransitioned,omitempty"`
|
||||
}
|
||||
|
||||
// DeleteMarkerMTime is an embedded type containing time.Time for XML marshal
|
||||
type DeleteMarkerMTime struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if t.Time.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// ObjectToDelete carries key name for the object to delete.
|
||||
type ObjectToDelete struct {
|
||||
ObjectName string `xml:"Key"`
|
||||
VersionID string `xml:"VersionId"`
|
||||
// Replication status of DeleteMarker
|
||||
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus"`
|
||||
// Status of versioned delete (of object or DeleteMarker)
|
||||
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus"`
|
||||
// Version ID of delete marker
|
||||
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId"`
|
||||
// PurgeTransitioned is nonempty if object is in transition tier
|
||||
PurgeTransitioned string `xml:"PurgeTransitioned"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
|
||||
@@ -27,13 +27,15 @@ import (
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
minio "github.com/minio/minio-go/v6"
|
||||
"github.com/minio/minio-go/v6/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
@@ -85,6 +87,7 @@ const (
|
||||
ErrInvalidMaxUploads
|
||||
ErrInvalidMaxParts
|
||||
ErrInvalidPartNumberMarker
|
||||
ErrInvalidPartNumber
|
||||
ErrInvalidRequestBody
|
||||
ErrInvalidCopySource
|
||||
ErrInvalidMetadataDirective
|
||||
@@ -104,8 +107,24 @@ const (
|
||||
ErrNoSuchCORSConfiguration
|
||||
ErrNoSuchWebsiteConfiguration
|
||||
ErrReplicationConfigurationNotFoundError
|
||||
ErrRemoteDestinationNotFoundError
|
||||
ErrReplicationDestinationMissingLock
|
||||
ErrRemoteTargetNotFoundError
|
||||
ErrReplicationRemoteConnectionError
|
||||
ErrBucketRemoteIdenticalToSource
|
||||
ErrBucketRemoteAlreadyExists
|
||||
ErrBucketRemoteLabelInUse
|
||||
ErrBucketRemoteArnTypeInvalid
|
||||
ErrBucketRemoteArnInvalid
|
||||
ErrBucketRemoteRemoveDisallowed
|
||||
ErrRemoteTargetNotVersionedError
|
||||
ErrReplicationSourceNotVersionedError
|
||||
ErrReplicationNeedsVersioningError
|
||||
ErrReplicationBucketNeedsVersioningError
|
||||
ErrObjectRestoreAlreadyInProgress
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrInvalidVersionID
|
||||
ErrNoSuchVersion
|
||||
ErrNotImplemented
|
||||
ErrPreconditionFailed
|
||||
@@ -216,6 +235,7 @@ const (
|
||||
ErrInvalidResourceName
|
||||
ErrServerNotInitialized
|
||||
ErrOperationTimedOut
|
||||
ErrClientDisconnected
|
||||
ErrOperationMaxedOut
|
||||
ErrInvalidRequest
|
||||
// MinIO storage class error codes
|
||||
@@ -243,7 +263,6 @@ const (
|
||||
// Bucket Quota error codes
|
||||
ErrAdminBucketQuotaExceeded
|
||||
ErrAdminNoSuchQuotaConfiguration
|
||||
ErrAdminBucketQuotaDisabled
|
||||
|
||||
ErrHealNotImplemented
|
||||
ErrHealNoSuchProcess
|
||||
@@ -344,6 +363,7 @@ const (
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrAccountNotEligible
|
||||
ErrServiceAccountNotFound
|
||||
ErrPostPolicyConditionInvalidFormat
|
||||
)
|
||||
@@ -418,6 +438,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Argument partNumberMarker must be an integer.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidPartNumber: {
|
||||
Code: "InvalidPartNumber",
|
||||
Description: "The requested partnumber is not satisfiable",
|
||||
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||
},
|
||||
ErrInvalidPolicyDocument: {
|
||||
Code: "InvalidPolicyDocument",
|
||||
Description: "The content of the form does not meet the conditions specified in the policy document.",
|
||||
@@ -538,11 +563,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchVersion: {
|
||||
ErrInvalidVersionID: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Invalid version id specified",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchVersion: {
|
||||
Code: "NoSuchVersion",
|
||||
Description: "The specified version does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNotImplemented: {
|
||||
Code: "NotImplemented",
|
||||
Description: "A header you provided implies functionality that is not implemented",
|
||||
@@ -648,20 +678,9 @@ var errorCodes = errorCodeMap{
|
||||
Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrMalformedCredentialDate: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; the region 'us-east-' is wrong; expecting 'us-east-1'".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrMalformedCredentialRegion: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; the region is wrong;",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format. This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRegion: {
|
||||
@@ -669,9 +688,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Region does not match.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect service \"s4\". This endpoint belongs to \"s3\".".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrInvalidServiceS3: {
|
||||
Code: "AuthorizationParametersError",
|
||||
Description: "Error parsing the Credential/X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
|
||||
@@ -682,9 +698,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Error parsing the Credential parameter; incorrect service. This endpoint belongs to \"sts\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal "aws4_reque". This endpoint uses "aws4_request".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrInvalidRequestVersion: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal. This endpoint uses \"aws4_request\".",
|
||||
@@ -755,8 +768,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Your key is too long",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// FIXME: Actual XML error response also contains the header which missed in list of signed header parameters.
|
||||
ErrUnsignedHeaders: {
|
||||
Code: "AccessDenied",
|
||||
Description: "There were headers present in the request which were not signed",
|
||||
@@ -812,6 +823,76 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The replication configuration was not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrRemoteDestinationNotFoundError: {
|
||||
Code: "RemoteDestinationNotFoundError",
|
||||
Description: "The remote destination bucket does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationDestinationMissingLock: {
|
||||
Code: "ReplicationDestinationMissingLockError",
|
||||
Description: "The replication destination bucket does not have object locking enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrRemoteTargetNotFoundError: {
|
||||
Code: "XMinioAdminRemoteTargetNotFoundError",
|
||||
Description: "The remote target does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationRemoteConnectionError: {
|
||||
Code: "XMinioAdminReplicationRemoteConnectionError",
|
||||
Description: "Remote service connection error - please check remote service credentials and target bucket",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrBucketRemoteIdenticalToSource: {
|
||||
Code: "XMinioAdminRemoteIdenticalToSource",
|
||||
Description: "The remote target cannot be identical to source",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteAlreadyExists: {
|
||||
Code: "XMinioAdminBucketRemoteAlreadyExists",
|
||||
Description: "The remote target already exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteLabelInUse: {
|
||||
Code: "XMinioAdminBucketRemoteLabelInUse",
|
||||
Description: "The remote target with this label already exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteRemoveDisallowed: {
|
||||
Code: "XMinioAdminRemoteRemoveDisallowed",
|
||||
Description: "This ARN is in use by an existing configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteArnTypeInvalid: {
|
||||
Code: "XMinioAdminRemoteARNTypeInvalid",
|
||||
Description: "The bucket remote ARN type is not valid",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteArnInvalid: {
|
||||
Code: "XMinioAdminRemoteArnInvalid",
|
||||
Description: "The bucket remote ARN does not have correct format",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrRemoteTargetNotVersionedError: {
|
||||
Code: "RemoteTargetNotVersionedError",
|
||||
Description: "The remote target does not have versioning enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationSourceNotVersionedError: {
|
||||
Code: "ReplicationSourceNotVersionedError",
|
||||
Description: "The replication source does not have versioning enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationNeedsVersioningError: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationBucketNeedsVersioningError: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Versioning must be 'Enabled' on the bucket to add a replication target",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchObjectLockConfiguration: {
|
||||
Code: "NoSuchObjectLockConfiguration",
|
||||
Description: "The specified object does not have a ObjectLock configuration",
|
||||
@@ -842,6 +923,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectRestoreAlreadyInProgress: {
|
||||
Code: "RestoreAlreadyInProgress",
|
||||
Description: "Object restore is already in progress",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
/// Bucket notification related errors.
|
||||
ErrEventNotification: {
|
||||
Code: "InvalidArgument",
|
||||
@@ -904,7 +990,7 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMetadataTooLarge: {
|
||||
Code: "InvalidArgument",
|
||||
Code: "MetadataTooLarge",
|
||||
Description: "Your metadata headers exceed the maximum allowed metadata size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
@@ -1128,11 +1214,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The quota configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminBucketQuotaDisabled: {
|
||||
Code: "XMinioAdminBucketQuotaDisabled",
|
||||
Description: "Quota specified but disk usage crawl is disabled on MinIO server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInsecureClientRequest: {
|
||||
Code: "XMinioInsecureClientRequest",
|
||||
Description: "Cannot respond to plain-text request from TLS-encrypted server",
|
||||
@@ -1143,6 +1224,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrClientDisconnected: {
|
||||
Code: "ClientDisconnected",
|
||||
Description: "Client disconnected before response was ready",
|
||||
HTTPStatusCode: 499, // No official code, use nginx value.
|
||||
},
|
||||
ErrOperationMaxedOut: {
|
||||
Code: "SlowDown",
|
||||
Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate",
|
||||
@@ -1641,12 +1727,17 @@ var errorCodes = errorCodeMap{
|
||||
ErrAddUserInvalidArgument: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "User is not allowed to be same as admin access key",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAdminAccountNotEligible: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The administrator key is not eligible for this operation",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAccountNotEligible: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The account key is not eligible for this operation",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrServiceAccountNotFound: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
@@ -1669,6 +1760,16 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Only return ErrClientDisconnected if the provided context is actually canceled.
|
||||
// This way downstream context.Canceled will still report ErrOperationTimedOut
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctx.Err() == context.Canceled {
|
||||
return ErrClientDisconnected
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
switch err {
|
||||
case errInvalidArgument:
|
||||
apiErr = ErrAdminInvalidArgument
|
||||
@@ -1785,6 +1886,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrNoSuchKey
|
||||
case MethodNotAllowed:
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case InvalidVersionID:
|
||||
apiErr = ErrInvalidVersionID
|
||||
case VersionNotFound:
|
||||
apiErr = ErrNoSuchVersion
|
||||
case ObjectAlreadyExists:
|
||||
@@ -1837,6 +1940,30 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrObjectLockConfigurationNotFound
|
||||
case BucketQuotaConfigNotFound:
|
||||
apiErr = ErrAdminNoSuchQuotaConfiguration
|
||||
case BucketReplicationConfigNotFound:
|
||||
apiErr = ErrReplicationConfigurationNotFoundError
|
||||
case BucketRemoteDestinationNotFound:
|
||||
apiErr = ErrRemoteDestinationNotFoundError
|
||||
case BucketReplicationDestinationMissingLock:
|
||||
apiErr = ErrReplicationDestinationMissingLock
|
||||
case BucketRemoteTargetNotFound:
|
||||
apiErr = ErrRemoteTargetNotFoundError
|
||||
case BucketRemoteConnectionErr:
|
||||
apiErr = ErrReplicationRemoteConnectionError
|
||||
case BucketRemoteAlreadyExists:
|
||||
apiErr = ErrBucketRemoteAlreadyExists
|
||||
case BucketRemoteLabelInUse:
|
||||
apiErr = ErrBucketRemoteLabelInUse
|
||||
case BucketRemoteArnTypeInvalid:
|
||||
apiErr = ErrBucketRemoteArnTypeInvalid
|
||||
case BucketRemoteArnInvalid:
|
||||
apiErr = ErrBucketRemoteArnInvalid
|
||||
case BucketRemoteRemoveDisallowed:
|
||||
apiErr = ErrBucketRemoteRemoveDisallowed
|
||||
case BucketRemoteTargetNotVersioned:
|
||||
apiErr = ErrRemoteTargetNotVersionedError
|
||||
case BucketReplicationSourceNotVersioned:
|
||||
apiErr = ErrReplicationSourceNotVersionedError
|
||||
case BucketQuotaExceeded:
|
||||
apiErr = ErrAdminBucketQuotaExceeded
|
||||
case *event.ErrInvalidEventName:
|
||||
@@ -1867,6 +1994,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrBackendDown
|
||||
case ObjectNameTooLong:
|
||||
apiErr = ErrKeyTooLongError
|
||||
case dns.ErrInvalidBucketName:
|
||||
apiErr = ErrInvalidBucketName
|
||||
default:
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
@@ -1903,6 +2032,12 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
|
||||
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
e, ok := err.(dns.ErrInvalidBucketName)
|
||||
if ok {
|
||||
code := toAPIErrorCode(ctx, e)
|
||||
apiErr = errorCodes.ToAPIErrWithErr(code, e)
|
||||
}
|
||||
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
@@ -1941,6 +2076,12 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case replication.Error:
|
||||
apiErr = APIError{
|
||||
Code: "MalformedXML",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case tags.Error:
|
||||
apiErr = APIError{
|
||||
Code: e.Code(),
|
||||
@@ -1984,6 +2125,12 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
HTTPStatusCode: e.Response().StatusCode,
|
||||
}
|
||||
// Add more Gateway SDKs here if any in future.
|
||||
default:
|
||||
apiErr = APIError{
|
||||
Code: apiErr.Code,
|
||||
Description: fmt.Sprintf("%s: cause(%v)", apiErr.Description, err),
|
||||
HTTPStatusCode: apiErr.HTTPStatusCode,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -38,9 +38,18 @@ func mustGetRequestID(t time.Time) string {
|
||||
return fmt.Sprintf("%X", t.UnixNano())
|
||||
}
|
||||
|
||||
// setEventStreamHeaders to allow proxies to avoid buffering proxy responses
|
||||
func setEventStreamHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set(xhttp.ContentType, "text/event-stream")
|
||||
w.Header().Set(xhttp.CacheControl, "no-cache") // nginx to turn off buffering
|
||||
w.Header().Set("X-Accel-Buffering", "no") // nginx to turn off buffering
|
||||
}
|
||||
|
||||
// Write http common headers
|
||||
func setCommonHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set(xhttp.ServerInfo, "MinIO/"+ReleaseTag)
|
||||
// Set the "Server" http header.
|
||||
w.Header().Set(xhttp.ServerInfo, "MinIO")
|
||||
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerRegion; region != "" {
|
||||
@@ -77,7 +86,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
@@ -108,10 +117,11 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
}
|
||||
|
||||
// Set tag count if object has tags
|
||||
tags, _ := url.ParseQuery(objInfo.UserTags)
|
||||
tagCount := len(tags)
|
||||
if tagCount > 0 {
|
||||
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tagCount)}
|
||||
if len(objInfo.UserTags) > 0 {
|
||||
tags, _ := url.ParseQuery(objInfo.UserTags)
|
||||
if len(tags) > 0 {
|
||||
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(len(tags))}
|
||||
}
|
||||
}
|
||||
|
||||
// Set all other user defined metadata.
|
||||
@@ -121,31 +131,43 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
|
||||
var isSet bool
|
||||
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
|
||||
if !strings.HasPrefix(k, userMetadataPrefix) {
|
||||
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(userMetadataPrefix)) {
|
||||
continue
|
||||
}
|
||||
w.Header()[strings.ToLower(k)] = []string{v}
|
||||
isSet = true
|
||||
break
|
||||
}
|
||||
|
||||
if !isSet {
|
||||
w.Header().Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
var start, rangeLen int64
|
||||
totalObjectSize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// for providing ranged content
|
||||
start, rangeLen, err := rs.GetOffsetLength(totalObjectSize)
|
||||
// For providing ranged content
|
||||
start, rangeLen, err = rs.GetOffsetLength(totalObjectSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rs == nil && opts.PartNumber > 0 {
|
||||
rs = partNumberToRangeSpec(objInfo, opts.PartNumber)
|
||||
}
|
||||
|
||||
// Set content length.
|
||||
w.Header().Set(xhttp.ContentLength, strconv.FormatInt(rangeLen, 10))
|
||||
if rs != nil {
|
||||
@@ -157,21 +179,27 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
if objInfo.VersionID != "" {
|
||||
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
|
||||
}
|
||||
|
||||
if objInfo.ReplicationStatus.String() != "" {
|
||||
w.Header()[xhttp.AmzBucketReplicationStatus] = []string{objInfo.ReplicationStatus.String()}
|
||||
}
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
VersionID: objInfo.VersionID,
|
||||
ModTime: objInfo.ModTime,
|
||||
IsLatest: objInfo.IsLatest,
|
||||
DeleteMarker: objInfo.DeleteMarker,
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
VersionID: objInfo.VersionID,
|
||||
ModTime: objInfo.ModTime,
|
||||
IsLatest: objInfo.IsLatest,
|
||||
DeleteMarker: objInfo.DeleteMarker,
|
||||
SuccessorModTime: objInfo.SuccessorModTime,
|
||||
})
|
||||
if !expiryTime.IsZero() {
|
||||
w.Header()[xhttp.AmzExpiration] = []string{
|
||||
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
|
||||
}
|
||||
}
|
||||
if objInfo.TransitionStatus == lifecycle.TransitionComplete {
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.StorageClass}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -34,11 +35,11 @@ import (
|
||||
|
||||
const (
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 10000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = metacacheBlockSize - (metacacheBlockSize / 10) // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
)
|
||||
|
||||
// LocationResponse - format for location response.
|
||||
@@ -387,7 +388,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
}
|
||||
proto := handlers.GetSourceScheme(r)
|
||||
if proto == "" {
|
||||
proto = getURLScheme(globalIsSSL)
|
||||
proto = getURLScheme(globalIsTLS)
|
||||
}
|
||||
u := &url.URL{
|
||||
Host: r.Host,
|
||||
@@ -396,8 +397,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
}
|
||||
// If domain is set then we need to use bucket DNS style.
|
||||
for _, domain := range domains {
|
||||
if strings.Contains(r.Host, domain) {
|
||||
u.Host = bucket + "." + r.Host
|
||||
if strings.HasPrefix(r.Host, bucket+"."+domain) {
|
||||
u.Path = path.Join(SlashSeparator, object)
|
||||
break
|
||||
}
|
||||
@@ -408,7 +408,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
// generates ListBucketsResponse from array of BucketInfo which can be
|
||||
// serialized to match XML and JSON API spec output.
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
var listbuckets []Bucket
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{}
|
||||
|
||||
@@ -428,8 +428,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
var versions []ObjectVersion
|
||||
var prefixes []CommonPrefix
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListVersionsResponse{}
|
||||
|
||||
@@ -473,6 +472,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
data.VersionIDMarker = versionIDMarker
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -484,8 +484,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
var contents []Object
|
||||
var prefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsResponse{}
|
||||
|
||||
@@ -517,9 +516,10 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
data.Marker = s3EncodeName(marker, encodingType)
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -531,8 +531,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
var contents []Object
|
||||
var commonPrefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
@@ -565,6 +564,10 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
content.UserMetadata[k] = v
|
||||
}
|
||||
}
|
||||
@@ -581,6 +584,8 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
|
||||
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
|
||||
data.IsTruncated = isTruncated
|
||||
|
||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -634,8 +639,16 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
listPartsResponse.Key = s3EncodeName(partsInfo.Object, encodingType)
|
||||
listPartsResponse.UploadID = partsInfo.UploadID
|
||||
listPartsResponse.StorageClass = globalMinioDefaultStorageClass
|
||||
listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID
|
||||
listPartsResponse.Owner.ID = globalMinioDefaultOwnerID
|
||||
|
||||
// Dumb values not meaningful
|
||||
listPartsResponse.Initiator = Initiator{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: globalMinioDefaultOwnerID,
|
||||
}
|
||||
listPartsResponse.Owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: globalMinioDefaultOwnerID,
|
||||
}
|
||||
|
||||
listPartsResponse.MaxParts = partsInfo.MaxParts
|
||||
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker
|
||||
@@ -698,10 +711,6 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
|
||||
}
|
||||
|
||||
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
if newObjectLayerFn() == nil {
|
||||
// Server still in safe mode.
|
||||
w.Header().Set(xhttp.MinIOServerStatus, "safemode")
|
||||
}
|
||||
setCommonHeaders(w)
|
||||
if mType != mimeNone {
|
||||
w.Header().Set(xhttp.ContentType, string(mType))
|
||||
@@ -760,14 +769,14 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "InvalidRegion":
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalServerRegion)
|
||||
case "AuthorizationHeaderMalformed":
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalBrowserEnabled {
|
||||
if newObjectLayerFn() == nil {
|
||||
// server still in safe mode.
|
||||
w.Header().Set(xhttp.MinIOServerStatus, "safemode")
|
||||
}
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
@@ -818,38 +827,3 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
// writeCustomErrorResponseXML - similar to writeErrorResponse,
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err APIError, errBody string, reqURL *url.URL, browser bool) {
|
||||
|
||||
switch err.Code {
|
||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalBrowserEnabled {
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
errorResponse := APIErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: errBody,
|
||||
Resource: reqURL.Path,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
||||
HostID: globalDeploymentID,
|
||||
}
|
||||
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func TestObjectLocation(t *testing.T) {
|
||||
// Server with virtual domain name.
|
||||
{
|
||||
request: &http.Request{
|
||||
Host: "mys3.bucket.org",
|
||||
Host: "mybucket.mys3.bucket.org",
|
||||
Header: map[string][]string{},
|
||||
},
|
||||
domains: []string{"mys3.bucket.org"},
|
||||
@@ -87,7 +87,7 @@ func TestObjectLocation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
request: &http.Request{
|
||||
Host: "mys3.bucket.org",
|
||||
Host: "mybucket.mys3.bucket.org",
|
||||
Header: map[string][]string{
|
||||
"X-Forwarded-Scheme": {httpsScheme},
|
||||
},
|
||||
@@ -98,11 +98,14 @@ func TestObjectLocation(t *testing.T) {
|
||||
expectedLocation: "https://mybucket.mys3.bucket.org/test/1.txt",
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
|
||||
if testCase.expectedLocation != gotLocation {
|
||||
t.Errorf("Test %d: expected %s, got %s", i+1, testCase.expectedLocation, gotLocation)
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
|
||||
if testCase.expectedLocation != gotLocation {
|
||||
t.Errorf("expected %s, got %s", testCase.expectedLocation, gotLocation)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
@@ -26,74 +27,92 @@ import (
|
||||
)
|
||||
|
||||
func newHTTPServerFn() *xhttp.Server {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
return globalHTTPServer
|
||||
}
|
||||
|
||||
func newObjectLayerWithoutSafeModeFn() ObjectLayer {
|
||||
func setHTTPServer(h *xhttp.Server) {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
return globalObjectAPI
|
||||
globalHTTPServer = h
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
func newObjectLayerFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newCachedObjectLayerFn() CacheObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
return globalCacheObjectAPI
|
||||
}
|
||||
|
||||
func setCacheObjectLayer(c CacheObjectLayer) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalCacheObjectAPI = c
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
func setObjectLayer(o ObjectLayer) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = o
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
CacheAPI func() CacheObjectLayer
|
||||
// Returns true of handlers should interpret encryption.
|
||||
EncryptionEnabled func() bool
|
||||
// Returns true if handlers allow SSE-KMS encryption headers.
|
||||
AllowSSEKMS func() bool
|
||||
}
|
||||
|
||||
// getHost tries its best to return the request host.
|
||||
// According to section 14.23 of RFC 2616 the Host header
|
||||
// can include the port number if the default value of 80 is not used.
|
||||
func getHost(r *http.Request) string {
|
||||
if r.URL.IsAbs() {
|
||||
return r.URL.Host
|
||||
}
|
||||
return r.Host
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) {
|
||||
func registerAPIRouter(router *mux.Router) {
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
CacheAPI: newCachedObjectLayerFn,
|
||||
EncryptionEnabled: func() bool {
|
||||
return encryptionEnabled
|
||||
},
|
||||
AllowSSEKMS: func() bool {
|
||||
return allowSSEKMS
|
||||
},
|
||||
}
|
||||
|
||||
// API Router
|
||||
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
|
||||
|
||||
var routers []*mux.Router
|
||||
for _, domainName := range globalDomainNames {
|
||||
if globalMinioPort == "80" || globalMinioPort == "443" {
|
||||
// For standard ports its possible, incoming requests
|
||||
// might not have a port assigned, in such scenarios use
|
||||
// have wildcard routing.
|
||||
// FIXME: remove this code once https://github.com/gorilla/mux/pull/579
|
||||
// is merged and released upstream, as this entire change can become
|
||||
// a single line.
|
||||
if IsKubernetes() {
|
||||
routers = append(routers, apiRouter.MatcherFunc(func(r *http.Request, match *mux.RouteMatch) bool {
|
||||
host, _, err := net.SplitHostPort(getHost(r))
|
||||
if err != nil {
|
||||
host = r.Host
|
||||
}
|
||||
// Make sure to skip matching minio.<domain>` this is
|
||||
// specifically meant for operator/k8s deployment
|
||||
// The reason we need to skip this is for a special
|
||||
// usecase where we need to make sure that
|
||||
// minio.<namespace>.svc.<cluster_domain> is ignored
|
||||
// by the bucketDNS style to ensure that path style
|
||||
// is available and honored at this domain.
|
||||
//
|
||||
// All other `<bucket>.<namespace>.svc.<cluster_domain>`
|
||||
// makes sure that buckets are routed through this matcher
|
||||
// to match for `<bucket>`
|
||||
return host != minioReservedBucket+"."+domainName
|
||||
}).Host("{bucket:.+}."+domainName).Subrouter())
|
||||
} else {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
}
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName+":{port:.*}").Subrouter())
|
||||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
@@ -101,220 +120,234 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
// Object operations
|
||||
// HeadObject
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
|
||||
collectAPIStats("headobject", maxClients(httpTraceAll(api.HeadObjectHandler))))
|
||||
// CopyObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(collectAPIStats("copyobjectpart", maxClients(httpTraceAll(api.CopyObjectPartHandler)))).
|
||||
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
collectAPIStats("putobjectpart", maxClients(httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectParts
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
collectAPIStats("listobjectparts", maxClients(httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
collectAPIStats("completemutipartupload", maxClients(httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
|
||||
collectAPIStats("newmultipartupload", maxClients(httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
collectAPIStats("abortmultipartupload", maxClients(httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
|
||||
collectAPIStats("getobjectacl", maxClients(httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
|
||||
// PutObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
|
||||
collectAPIStats("putobjectacl", maxClients(httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
|
||||
// GetObjectTagging
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
|
||||
collectAPIStats("getobjecttagging", maxClients(httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// PutObjectTagging
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
|
||||
collectAPIStats("putobjecttagging", maxClients(httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// DeleteObjectTagging
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
|
||||
collectAPIStats("deleteobjecttagging", maxClients(httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// SelectObjectContent
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
|
||||
collectAPIStats("selectobjectcontent", maxClients(httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
|
||||
// GetObjectRetention
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
|
||||
collectAPIStats("getobjectretention", maxClients(httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
|
||||
// GetObjectLegalHold
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
collectAPIStats("getobjectlegalhold", maxClients(httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
// GetObject
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
|
||||
collectAPIStats("getobject", maxClients(httpTraceHdrs(api.GetObjectHandler))))
|
||||
// CopyObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(
|
||||
collectAPIStats("copyobject", maxClients(httpTraceAll(api.CopyObjectHandler))))
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
|
||||
collectAPIStats("putobjectretention", maxClients(httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
|
||||
// PutObjectLegalHold
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
collectAPIStats("putobjectlegalhold", maxClients(httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
|
||||
// PutObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler))))
|
||||
collectAPIStats("putobject", maxClients(httpTraceHdrs(api.PutObjectHandler))))
|
||||
// DeleteObject
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler))))
|
||||
collectAPIStats("deleteobject", maxClients(httpTraceAll(api.DeleteObjectHandler))))
|
||||
|
||||
/// Bucket operations
|
||||
// GetBucketLocation
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
|
||||
collectAPIStats("getbucketlocation", maxClients(httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
|
||||
// GetBucketPolicy
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
|
||||
collectAPIStats("getbucketpolicy", maxClients(httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
|
||||
// GetBucketLifecycle
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
collectAPIStats("getbucketlifecycle", maxClients(httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketEncryption
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
collectAPIStats("getbucketencryption", maxClients(httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
// GetBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketobjectlockconfiguration", maxClients(httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// GetBucketReplicationConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketreplicationconfiguration", maxClients(httpTraceAll(api.GetBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
|
||||
// GetBucketVersioning
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketversioning", maxClients(httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketnotification", maxClients(httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
|
||||
// ListenNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listennotification", maxClients(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
|
||||
collectAPIStats("getbucketacl", maxClients(httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
|
||||
// PutBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
|
||||
collectAPIStats("putbucketacl", maxClients(httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
|
||||
// GetBucketCors - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
|
||||
collectAPIStats("getbucketcors", maxClients(httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
|
||||
collectAPIStats("getbucketwebsite", maxClients(httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
|
||||
// GetBucketAccelerateHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
|
||||
collectAPIStats("getbucketaccelerate", maxClients(httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
|
||||
// GetBucketRequestPaymentHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
|
||||
collectAPIStats("getbucketrequestpayment", maxClients(httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
|
||||
// GetBucketLoggingHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
|
||||
collectAPIStats("getbucketlogging", maxClients(httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
|
||||
// GetBucketLifecycleHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketReplicationHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler)))).Queries("replication", "")
|
||||
collectAPIStats("getbucketlifecycle", maxClients(httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketTaggingHandler
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
|
||||
collectAPIStats("getbuckettagging", maxClients(httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
|
||||
collectAPIStats("deletebucketwebsite", maxClients(httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
|
||||
collectAPIStats("deletebuckettagging", maxClients(httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
|
||||
|
||||
// GetBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// GetBucketVersioning
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
|
||||
// ListenBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listenbucketnotification", httpTraceAll(api.ListenBucketNotificationHandler))).Queries("events", "{events:.*}")
|
||||
// ListMultipartUploads
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
|
||||
collectAPIStats("listmultipartuploads", maxClients(httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
|
||||
// ListObjectsV2M
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
|
||||
collectAPIStats("listobjectsv2M", maxClients(httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
|
||||
// ListObjectsV2
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
|
||||
collectAPIStats("listobjectsv2", maxClients(httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
|
||||
// ListObjectVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectversions", httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
|
||||
collectAPIStats("listobjectversions", maxClients(httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler))))
|
||||
collectAPIStats("listobjectsv1", maxClients(httpTraceAll(api.ListObjectsV1Handler))))
|
||||
// PutBucketLifecycle
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
collectAPIStats("putbucketlifecycle", maxClients(httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// PutBucketReplicationConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketreplicationconfiguration", maxClients(httpTraceAll(api.PutBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
// GetObjectRetention
|
||||
|
||||
// PutBucketEncryption
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
collectAPIStats("putbucketencryption", maxClients(httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
|
||||
// PutBucketPolicy
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
|
||||
collectAPIStats("putbucketpolicy", maxClients(httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
|
||||
|
||||
// PutBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
collectAPIStats("putbucketobjectlockconfig", maxClients(httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// PutBucketTaggingHandler
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbuckettagging", httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
|
||||
collectAPIStats("putbuckettagging", maxClients(httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
|
||||
// PutBucketVersioning
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
|
||||
collectAPIStats("putbucketversioning", maxClients(httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// PutBucketNotification
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
|
||||
collectAPIStats("putbucketnotification", maxClients(httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
|
||||
// PutBucket
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler))))
|
||||
collectAPIStats("putbucket", maxClients(httpTraceAll(api.PutBucketHandler))))
|
||||
// HeadBucket
|
||||
bucket.Methods(http.MethodHead).HandlerFunc(
|
||||
maxClients(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler))))
|
||||
collectAPIStats("headbucket", maxClients(httpTraceAll(api.HeadBucketHandler))))
|
||||
// PostPolicy
|
||||
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
|
||||
maxClients(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler))))
|
||||
collectAPIStats("postpolicybucket", maxClients(httpTraceHdrs(api.PostPolicyBucketHandler))))
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
|
||||
collectAPIStats("deletemultipleobjects", maxClients(httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
|
||||
collectAPIStats("deletebucketpolicy", maxClients(httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
|
||||
// DeleteBucketReplication
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketreplicationconfiguration", maxClients(httpTraceAll(api.DeleteBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
// DeleteBucketLifecycle
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
collectAPIStats("deletebucketlifecycle", maxClients(httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// DeleteBucketEncryption
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
collectAPIStats("deletebucketencryption", maxClients(httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
// DeleteBucket
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler))))
|
||||
collectAPIStats("deletebucket", maxClients(httpTraceAll(api.DeleteBucketHandler))))
|
||||
// PostRestoreObject
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("restoreobject", maxClients(httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
collectAPIStats("listennotification", maxClients(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
collectAPIStats("listbuckets", maxClients(httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// S3 browser with signature v4 adds '//' for ListBuckets request, so rather
|
||||
// than failing with UnknownAPIRequest we simply handle it for now.
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// Supports cors only for S3 handlers
|
||||
apiRouter.Methods(http.MethodOptions).Path(SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("cors", httpTraceAll(corsHandlerFunc()))))
|
||||
|
||||
apiRouter.Methods(http.MethodOptions).Path(SlashSeparator + SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("cors", httpTraceAll(corsHandlerFunc()))))
|
||||
collectAPIStats("listbuckets", maxClients(httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.MethodNotAllowedHandler = http.HandlerFunc(collectAPIStats("methodnotallowed", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
||||
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
|
||||
|
||||
}
|
||||
|
||||
// setCorsHandler handler for CORS (Cross Origin Resource Sharing)
|
||||
func corsHandlerFunc() http.HandlerFunc {
|
||||
// corsHandler handler for CORS (Cross Origin Resource Sharing)
|
||||
func corsHandler(handler http.Handler) http.Handler {
|
||||
commonS3Headers := []string{
|
||||
xhttp.Date,
|
||||
xhttp.ETag,
|
||||
@@ -325,12 +358,19 @@ func corsHandlerFunc() http.HandlerFunc {
|
||||
xhttp.ContentEncoding,
|
||||
xhttp.ContentLength,
|
||||
xhttp.ContentType,
|
||||
xhttp.ContentDisposition,
|
||||
xhttp.LastModified,
|
||||
xhttp.ContentLanguage,
|
||||
xhttp.CacheControl,
|
||||
xhttp.RetryAfter,
|
||||
xhttp.AmzBucketRegion,
|
||||
xhttp.Expires,
|
||||
"X-Amz*",
|
||||
"x-amz*",
|
||||
"*",
|
||||
}
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
return cors.New(cors.Options{
|
||||
AllowOriginFunc: func(origin string) bool {
|
||||
for _, allowedOrigin := range globalAPIConfig.getCorsAllowOrigins() {
|
||||
if wildcard.MatchSimple(allowedOrigin, origin) {
|
||||
@@ -351,7 +391,5 @@ func corsHandlerFunc() http.HandlerFunc {
|
||||
AllowedHeaders: commonS3Headers,
|
||||
ExposedHeaders: commonS3Headers,
|
||||
AllowCredentials: true,
|
||||
})
|
||||
|
||||
return c.HandlerFunc
|
||||
}).Handler(handler)
|
||||
}
|
||||
|
||||
@@ -151,9 +151,10 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
||||
return cred, claims, owner, ErrNone
|
||||
}
|
||||
|
||||
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
|
||||
// checkAdminRequestAuth checks for authentication and authorization for the incoming
|
||||
// request. It only accepts V2 and V4 requests. Presigned, JWT and anonymous requests
|
||||
// are automatically rejected.
|
||||
func checkAdminRequestAuth(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, region)
|
||||
if s3Err != ErrNone {
|
||||
return cred, s3Err
|
||||
@@ -213,15 +214,15 @@ func getClaimsFromToken(r *http.Request, token string) (map[string]interface{},
|
||||
|
||||
if globalPolicyOPA == nil {
|
||||
// If OPA is not set and if ldap claim key is set, allow the claim.
|
||||
if _, ok := claims.Lookup(ldapUser); ok {
|
||||
if _, ok := claims.MapClaims[ldapUser]; ok {
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// If OPA is not set, session token should
|
||||
// have a policy and its mandatory, reject
|
||||
// requests without policy claim.
|
||||
_, pokOpenID := claims.Lookup(iamPolicyClaimNameOpenID())
|
||||
_, pokSA := claims.Lookup(iamPolicyClaimNameSA())
|
||||
_, pokOpenID := claims.MapClaims[iamPolicyClaimNameOpenID()]
|
||||
_, pokSA := claims.MapClaims[iamPolicyClaimNameSA()]
|
||||
if !pokOpenID && !pokSA {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
@@ -333,8 +334,12 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Populate payload again to handle it in HTTP handler.
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
|
||||
}
|
||||
if cred.AccessKey != "" {
|
||||
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
|
||||
}
|
||||
|
||||
if cred.AccessKey == "" {
|
||||
if action != policy.ListAllMyBucketsAction && cred.AccessKey == "" {
|
||||
// Anonymous checks are not meant for ListBuckets action
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: action,
|
||||
@@ -346,8 +351,26 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
|
||||
// verify as a fallback.
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
@@ -360,6 +383,24 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
|
||||
// verify as a fallback.
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
@@ -418,8 +459,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
|
||||
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
|
||||
// The verification happens implicit during reading.
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5),
|
||||
hex.EncodeToString(contentSHA256), -1, globalCLIContext.StrictS3Compat)
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
@@ -427,16 +467,6 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// authHandler - handles all the incoming authorization headers and validates them if possible.
|
||||
type authHandler struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
// setAuthHandler to validate authorization header for the incoming request.
|
||||
func setAuthHandler(h http.Handler) http.Handler {
|
||||
return authHandler{h}
|
||||
}
|
||||
|
||||
// List of all support S3 auth types.
|
||||
var supportedS3AuthTypes = map[authType]struct{}{
|
||||
authTypeAnonymous: {},
|
||||
@@ -454,26 +484,30 @@ func isSupportedS3AuthType(aType authType) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
// handler for validating incoming authorization headers.
|
||||
func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
aType := getRequestAuthType(r)
|
||||
if isSupportedS3AuthType(aType) {
|
||||
// Let top level caller validate for anonymous and known signed requests.
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeJWT {
|
||||
// Validate Authorization header if its valid for JWT request.
|
||||
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
// setAuthHandler to validate authorization header for the incoming request.
|
||||
func setAuthHandler(h http.Handler) http.Handler {
|
||||
// handler for validating incoming authorization headers.
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
aType := getRequestAuthType(r)
|
||||
if isSupportedS3AuthType(aType) {
|
||||
// Let top level caller validate for anonymous and known signed requests.
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeJWT {
|
||||
// Validate Authorization header if its valid for JWT request.
|
||||
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
w.Write([]byte(authErr.Error()))
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeSTS {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeSTS {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
})
|
||||
}
|
||||
|
||||
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, map[string]interface{}, APIErrorCode) {
|
||||
@@ -519,7 +553,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.Action(policy.BypassGovernanceRetentionAction),
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
@@ -528,7 +562,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.Action(policy.PutObjectRetentionAction),
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
@@ -551,7 +585,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
Action: iampolicy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ObjectName: objectName,
|
||||
ConditionValues: conditions,
|
||||
@@ -561,7 +595,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
Action: iampolicy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
ObjectName: objectName,
|
||||
@@ -579,7 +613,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
|
||||
// call verifies bucket policies and IAM policies, supports multi user
|
||||
// checks etc.
|
||||
func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
|
||||
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch atype {
|
||||
@@ -600,6 +634,10 @@ func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.R
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if cred.AccessKey != "" {
|
||||
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
|
||||
}
|
||||
|
||||
// Do not check for PutObjectRetentionAction permission,
|
||||
// if mode and retain until date are not set.
|
||||
// Can happen when bucket has default lock config set
|
||||
|
||||
@@ -52,7 +52,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
"X-Amz-Content-Sha256": []string{streamingContentSHA256},
|
||||
"Content-Encoding": []string{streamingContentEncoding},
|
||||
},
|
||||
Method: "PUT",
|
||||
Method: http.MethodPut,
|
||||
},
|
||||
authT: authTypeStreamingSigned,
|
||||
},
|
||||
@@ -111,7 +111,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
Header: http.Header{
|
||||
"Content-Type": []string{"multipart/form-data"},
|
||||
},
|
||||
Method: "POST",
|
||||
Method: http.MethodPost,
|
||||
},
|
||||
authT: authTypePostPolicy,
|
||||
},
|
||||
@@ -212,7 +212,7 @@ func TestIsRequestPresignedSignatureV2(t *testing.T) {
|
||||
for i, testCase := range testCases {
|
||||
// creating an input HTTP request.
|
||||
// Only the query parameters are relevant for this particular test.
|
||||
inputReq, err := http.NewRequest("GET", "http://example.com", nil)
|
||||
inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Error initializing input HTTP request: %v", err)
|
||||
}
|
||||
@@ -246,7 +246,7 @@ func TestIsRequestPresignedSignatureV4(t *testing.T) {
|
||||
for i, testCase := range testCases {
|
||||
// creating an input HTTP request.
|
||||
// Only the query parameters are relevant for this particular test.
|
||||
inputReq, err := http.NewRequest("GET", "http://example.com", nil)
|
||||
inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Error initializing input HTTP request: %v", err)
|
||||
}
|
||||
@@ -369,15 +369,15 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
s3Error APIErrorCode
|
||||
}{
|
||||
// When request is unsigned, access denied is returned.
|
||||
{mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
|
||||
{mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
|
||||
// Empty Content-Md5 header.
|
||||
{mustNewSignedEmptyMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
|
||||
{mustNewSignedEmptyMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
|
||||
// Short Content-Md5 header.
|
||||
{mustNewSignedShortMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
|
||||
{mustNewSignedShortMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
|
||||
// When request is properly signed, but has bad Content-MD5 header.
|
||||
{mustNewSignedBadMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
|
||||
{mustNewSignedBadMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
|
||||
// When request is properly signed, error is none.
|
||||
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
{mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
@@ -413,15 +413,15 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
Request *http.Request
|
||||
ErrCode APIErrorCode
|
||||
}{
|
||||
{Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
|
||||
{Request: mustNewSignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
|
||||
{Request: mustNewSignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
}
|
||||
ctx := context.Background()
|
||||
for i, testCase := range testCases {
|
||||
if _, s3Error := checkAdminRequestAuthType(ctx, testCase.Request, iampolicy.AllAdminActions, globalServerRegion); s3Error != testCase.ErrCode {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, iampolicy.AllAdminActions, globalServerRegion); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
@@ -461,7 +461,7 @@ func TestValidateAdminSignature(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
req := mustNewRequest("GET", "http://localhost:9000/", 0, nil, t)
|
||||
req := mustNewRequest(http.MethodGet, "http://localhost:9000/", 0, nil, t)
|
||||
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -55,15 +54,39 @@ func (h *healRoutine) queueHealTask(task healTask) {
|
||||
h.tasks <- task
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq(tolerance int32) {
|
||||
func waitForLowHTTPReq(maxIO int, maxWait time.Duration) {
|
||||
// No need to wait run at full speed.
|
||||
if maxIO <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// At max 10 attempts to wait with 100 millisecond interval before proceeding
|
||||
waitTick := 100 * time.Millisecond
|
||||
|
||||
// Bucket notification and http trace are not costly, it is okay to ignore them
|
||||
// while counting the number of concurrent connections
|
||||
maxIOFn := func() int {
|
||||
return maxIO + int(globalHTTPListen.NumSubscribers()) + int(globalHTTPTrace.NumSubscribers())
|
||||
}
|
||||
|
||||
tmpMaxWait := maxWait
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||
waitCount := 600
|
||||
// Any requests in progress, delay the heal.
|
||||
for (httpServer.GetRequestCount() >= tolerance) &&
|
||||
waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
for httpServer.GetRequestCount() >= maxIOFn() {
|
||||
if tmpMaxWait > 0 {
|
||||
if tmpMaxWait < waitTick {
|
||||
time.Sleep(tmpMaxWait)
|
||||
} else {
|
||||
time.Sleep(waitTick)
|
||||
}
|
||||
tmpMaxWait = tmpMaxWait - waitTick
|
||||
}
|
||||
if tmpMaxWait <= 0 {
|
||||
if intDataUpdateTracker.debug {
|
||||
logger.Info("waitForLowHTTPReq: waited max %s, resuming", maxWait)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -74,28 +97,25 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
select {
|
||||
case task, ok := <-h.tasks:
|
||||
if !ok {
|
||||
break
|
||||
return
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()))
|
||||
|
||||
var res madmin.HealResultItem
|
||||
var err error
|
||||
switch {
|
||||
case task.bucket == nopHeal:
|
||||
switch task.bucket {
|
||||
case nopHeal:
|
||||
continue
|
||||
case task.bucket == SlashSeparator:
|
||||
case SlashSeparator:
|
||||
res, err = healDiskFormat(ctx, objAPI, task.opts)
|
||||
case task.bucket != "" && task.object == "":
|
||||
res, err = objAPI.HealBucket(ctx, task.bucket, task.opts.DryRun, task.opts.Remove)
|
||||
case task.bucket != "" && task.object != "":
|
||||
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
|
||||
}
|
||||
if task.bucket != "" && task.object != "" {
|
||||
ObjectPathUpdated(path.Join(task.bucket, task.object))
|
||||
default:
|
||||
if task.object == "" {
|
||||
res, err = objAPI.HealBucket(ctx, task.bucket, task.opts)
|
||||
} else {
|
||||
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
|
||||
}
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
|
||||
case <-h.doneCh:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
@@ -112,24 +132,6 @@ func newHealRoutine() *healRoutine {
|
||||
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
nh := newBgHealSequence()
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
if err := nh.healDiskMeta(); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(nh.ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(nh)
|
||||
}
|
||||
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
@@ -141,24 +143,5 @@ func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpt
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers if healing is not required.
|
||||
if err == nil {
|
||||
// Notify servers in background and retry if needed.
|
||||
go func() {
|
||||
retry:
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(opts.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
if nerr.Err.Error() == errServerNotInitialized.Error() {
|
||||
time.Sleep(time.Second)
|
||||
goto retry
|
||||
}
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -18,106 +18,208 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/color"
|
||||
"github.com/minio/minio/pkg/console"
|
||||
)
|
||||
|
||||
const defaultMonitorNewDiskInterval = time.Minute * 10
|
||||
const (
|
||||
defaultMonitorNewDiskInterval = time.Second * 10
|
||||
healingTrackerFilename = ".healing.bin"
|
||||
)
|
||||
|
||||
func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
go monitorLocalDisksAndHeal(ctx, objAPI)
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
type healingTracker struct {
|
||||
ID string
|
||||
|
||||
// future add more tracking capabilities
|
||||
}
|
||||
|
||||
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
z, ok := objAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
initBackgroundHealing(ctx, objAPI) // start quick background healing
|
||||
|
||||
bgSeq := mustGetHealSequence(ctx)
|
||||
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
|
||||
if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
|
||||
drivesToHeal, defaultMonitorNewDiskInterval))
|
||||
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
// Start with format healing
|
||||
if err := bgSeq.healDiskFormat(); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := bgSeq.healDiskMeta(objAPI); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
for _, ep := range globalEndpoints {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
disk, _, err := connectEndpoint(endpoint)
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, endpoint)
|
||||
} else if err == nil && disk != nil && disk.Healing() {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
}
|
||||
return disksToHeal
|
||||
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
z, ok := objAPI.(*erasureZones)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var bgSeq *healSequence
|
||||
var found bool
|
||||
|
||||
for {
|
||||
bgSeq, found = globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
|
||||
if found {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq *healSequence) {
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
diskCheckTimer := time.NewTimer(defaultMonitorNewDiskInterval)
|
||||
defer diskCheckTimer.Stop()
|
||||
wait:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(defaultMonitorNewDiskInterval):
|
||||
// Attempt a heal as the server starts-up first.
|
||||
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
|
||||
var healNewDisks bool
|
||||
for i, ep := range globalEndpoints {
|
||||
localDisksToHeal := Endpoints{}
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
if err == errUnformattedDisk {
|
||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||
}
|
||||
case <-diskCheckTimer.C:
|
||||
// Reset to next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
|
||||
var erasureSetInPoolDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
healDisks := globalBackgroundHealState.getHealLocalDisks()
|
||||
if len(healDisks) > 0 {
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{bucket: nopHeal}
|
||||
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
len(healDisks)))
|
||||
|
||||
erasureSetInPoolDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
||||
for i := range z.serverPools {
|
||||
erasureSetInPoolDisksToHeal[i] = map[int][]StorageAPI{}
|
||||
}
|
||||
if len(localDisksToHeal) == 0 {
|
||||
}
|
||||
|
||||
if serverDebugLog {
|
||||
console.Debugf(color.Green("healDisk:")+" disk check timer fired, attempting to heal %d drives\n", len(healDisks))
|
||||
}
|
||||
|
||||
// heal only if new disks found.
|
||||
for _, endpoint := range healDisks {
|
||||
disk, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, true)
|
||||
continue
|
||||
}
|
||||
localDisksInZoneHeal[i] = localDisksToHeal
|
||||
healNewDisks = true
|
||||
}
|
||||
|
||||
// Reformat disks only if needed.
|
||||
if !healNewDisks {
|
||||
continue
|
||||
}
|
||||
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{bucket: nopHeal}
|
||||
|
||||
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
|
||||
// Compute the list of erasure set to heal
|
||||
for i, localDisksToHeal := range localDisksInZoneHeal {
|
||||
var erasureSetToHeal []int
|
||||
for _, endpoint := range localDisksToHeal {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||
poolIdx := globalEndpoints.GetLocalPoolIdx(disk.Endpoint())
|
||||
if poolIdx < 0 {
|
||||
continue
|
||||
}
|
||||
erasureSetInZoneToHeal[i] = erasureSetToHeal
|
||||
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
z.serverPools[poolIdx].erasureDisksMu.RLock()
|
||||
// Protect reading reference format.
|
||||
setIndex, _, err := findDiskIndex(z.serverPools[poolIdx].format, format)
|
||||
z.serverPools[poolIdx].erasureDisksMu.RUnlock()
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, false)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetInPoolDisksToHeal[poolIdx][setIndex] = append(erasureSetInPoolDisksToHeal[poolIdx][setIndex], disk)
|
||||
}
|
||||
|
||||
// Heal all erasure sets that need
|
||||
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
|
||||
for _, setIndex := range erasureSetToHeal {
|
||||
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].drivesPerSet)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
buckets, _ := z.ListBuckets(ctx)
|
||||
|
||||
// Heal latest buckets first.
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
return buckets[i].Created.After(buckets[j].Created)
|
||||
})
|
||||
|
||||
for i, setMap := range erasureSetInPoolDisksToHeal {
|
||||
for setIndex, disks := range setMap {
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%s' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
if !disk.Healing() {
|
||||
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool", disk, humanize.Ordinal(i+1))
|
||||
diskID, err := disk.GetDiskID()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// reading format.json failed or not found, proceed to look
|
||||
// for new disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
}
|
||||
|
||||
if err := saveHealingTracker(disk, diskID); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// Unable to write healing tracker, permission denied or some
|
||||
// other unexpected error occurred. Proceed to look for new
|
||||
// disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
}
|
||||
}
|
||||
|
||||
err := z.serverPools[i].sets[setIndex].healErasureSet(ctx, buckets)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
|
||||
|
||||
if err := disk.Delete(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
healingTrackerFilename, false); err != nil && !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
110
cmd/background-newdisks-heal-ops_gen.go
Normal file
110
cmd/background-newdisks-heal-ops_gen.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ID":
|
||||
z.ID, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
// write "ID"
|
||||
err = en.Append(0x81, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.ID)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// string "ID"
|
||||
o = append(o, 0x81, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ID":
|
||||
z.ID, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z healingTracker) Msgsize() (s int) {
|
||||
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID)
|
||||
return
|
||||
}
|
||||
123
cmd/background-newdisks-heal-ops_gen_test.go
Normal file
123
cmd/background-newdisks-heal-ops_gen_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalhealingTracker(t *testing.T) {
|
||||
v := healingTracker{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsghealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsghealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalhealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodehealingTracker(t *testing.T) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodehealingTracker Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := healingTracker{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodehealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodehealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
@@ -55,7 +54,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -114,7 +113,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
md5hex := getMD5Hash([]byte(textPartData))
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
|
||||
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -175,56 +174,6 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int)
|
||||
runPutObjectBenchmarkParallel(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// Benchmark utility functions for ObjectLayer.GetObject().
|
||||
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
|
||||
func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
textData := generateBytesData(objSize)
|
||||
|
||||
// generate etag for the generated data.
|
||||
// etag of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
||||
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
md5hex := getMD5Hash(textData)
|
||||
sha256hex := ""
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.ETag != md5hex {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
|
||||
}
|
||||
}
|
||||
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
var buffer = new(bytes.Buffer)
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
// Benchmark ends here. Stop timer.
|
||||
b.StopTimer()
|
||||
|
||||
}
|
||||
|
||||
// randomly picks a character and returns its equivalent byte array.
|
||||
func getRandomByte() []byte {
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
@@ -240,38 +189,6 @@ func generateBytesData(size int) []byte {
|
||||
return bytes.Repeat(getRandomByte(), size)
|
||||
}
|
||||
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
||||
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runGetObjectBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
||||
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runGetObjectBenchmarkParallel(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// Parallel benchmark utility functions for ObjectLayer.PutObject().
|
||||
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
|
||||
func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
@@ -301,7 +218,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
for pb.Next() {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -315,58 +232,3 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// Benchmark ends here. Stop timer.
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
// Parallel benchmark utility functions for ObjectLayer.GetObject().
|
||||
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
|
||||
func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
||||
|
||||
md5hex := getMD5Hash([]byte(textData))
|
||||
sha256hex := ""
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.ETag != md5hex {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
|
||||
}
|
||||
}
|
||||
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
i := 0
|
||||
for pb.Next() {
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
i++
|
||||
if i == 10 {
|
||||
i = 0
|
||||
}
|
||||
}
|
||||
})
|
||||
// Benchmark ends here. Stop timer.
|
||||
b.StopTimer()
|
||||
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
@@ -80,7 +81,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
err := disk.CreateFile(volume, filePath, totalFileSize, r)
|
||||
err := disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r)
|
||||
r.CloseWithError(err)
|
||||
close(bw.canClose)
|
||||
}()
|
||||
@@ -90,7 +91,8 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
// ReadAt() implementation which verifies the bitrot hash available as part of the stream.
|
||||
type streamingBitrotReader struct {
|
||||
disk StorageAPI
|
||||
rc io.ReadCloser
|
||||
data []byte
|
||||
rc io.Reader
|
||||
volume string
|
||||
filePath string
|
||||
tillOffset int64
|
||||
@@ -104,7 +106,10 @@ func (b *streamingBitrotReader) Close() error {
|
||||
if b.rc == nil {
|
||||
return nil
|
||||
}
|
||||
return b.rc.Close()
|
||||
if closer, ok := b.rc.(io.Closer); ok {
|
||||
return closer.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
@@ -118,11 +123,16 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
// For the first ReadAt() call we need to open the stream for reading.
|
||||
b.currOffset = offset
|
||||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
||||
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if len(b.data) == 0 {
|
||||
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
} else {
|
||||
b.rc = io.NewSectionReader(bytes.NewReader(b.data), streamOffset, b.tillOffset-streamOffset)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if offset != b.currOffset {
|
||||
// Can never happen unless there are programmer bugs
|
||||
return 0, errUnexpected
|
||||
@@ -139,20 +149,20 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err := &errHashMismatch{fmt.Sprintf("hashes do not match expected %s, got %s",
|
||||
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))))
|
||||
return 0, errFileCorrupt
|
||||
}
|
||||
b.currOffset += int64(len(buf))
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
// Returns streaming bitrot reader implementation.
|
||||
func newStreamingBitrotReader(disk StorageAPI, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
|
||||
func newStreamingBitrotReader(disk StorageAPI, data []byte, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
|
||||
h := algo.New()
|
||||
return &streamingBitrotReader{
|
||||
disk,
|
||||
data,
|
||||
nil,
|
||||
volume,
|
||||
filePath,
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
@@ -33,14 +35,14 @@ type wholeBitrotWriter struct {
|
||||
}
|
||||
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
err := b.disk.AppendFile(b.volume, b.filePath, p)
|
||||
err := b.disk.AppendFile(context.TODO(), b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
_, err = b.Hash.Write(p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
@@ -68,15 +70,13 @@ type wholeBitrotReader struct {
|
||||
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) {
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
ctx := GlobalContext
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
|
||||
logger.LogIf(ctx, err)
|
||||
if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(GlobalContext, errLessData)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
|
||||
@@ -17,13 +17,13 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/highwayhash"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
"golang.org/x/crypto/blake2b"
|
||||
)
|
||||
|
||||
@@ -103,9 +103,9 @@ func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, alg
|
||||
return newWholeBitrotWriter(disk, volume, filePath, algo, shardSize)
|
||||
}
|
||||
|
||||
func newBitrotReader(disk StorageAPI, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
|
||||
func newBitrotReader(disk StorageAPI, data []byte, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotReader(disk, bucket, filePath, tillOffset, algo, shardSize)
|
||||
return newStreamingBitrotReader(disk, data, bucket, filePath, tillOffset, algo, shardSize)
|
||||
}
|
||||
return newWholeBitrotReader(disk, bucket, filePath, algo, tillOffset, sum)
|
||||
}
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
@@ -27,53 +27,53 @@ import (
|
||||
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
tmpDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
volume := "testvol"
|
||||
filePath := "testfile"
|
||||
|
||||
disk, err := newXLStorage(tmpDir, "")
|
||||
disk, err := newLocalXLStorage(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
disk.MakeVol(volume)
|
||||
disk.MakeVol(context.Background(), volume)
|
||||
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
|
||||
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
writer.(io.Closer).Close()
|
||||
|
||||
reader := newBitrotReader(disk, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
|
||||
reader := newBitrotReader(disk, nil, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
|
||||
b := make([]byte, 10)
|
||||
if _, err = reader.ReadAt(b, 0); err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b, 10); err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b, 20); err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b[:5], 30); err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,9 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -29,7 +27,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/rest"
|
||||
@@ -54,7 +52,7 @@ type bootstrapRESTServer struct{}
|
||||
type ServerSystemConfig struct {
|
||||
MinioPlatform string
|
||||
MinioRuntime string
|
||||
MinioEndpoints EndpointZones
|
||||
MinioEndpoints EndpointServerPools
|
||||
}
|
||||
|
||||
// Diff - returns error on first difference found in two configs.
|
||||
@@ -131,7 +129,7 @@ func (client *bootstrapRESTClient) callWithContext(ctx context.Context, method s
|
||||
values = make(url.Values)
|
||||
}
|
||||
|
||||
respBody, err = client.restClient.CallWithContext(ctx, method, values, body, length)
|
||||
respBody, err = client.restClient.Call(ctx, method, values, body, length)
|
||||
if err == nil {
|
||||
return respBody, nil
|
||||
}
|
||||
@@ -161,9 +159,9 @@ func (client *bootstrapRESTClient) Verify(ctx context.Context, srcCfg ServerSyst
|
||||
return srcCfg.Diff(recvCfg)
|
||||
}
|
||||
|
||||
func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones) error {
|
||||
func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointServerPools) error {
|
||||
srcCfg := getServerSystemCfg()
|
||||
clnts := newBootstrapRESTClients(endpointZones)
|
||||
clnts := newBootstrapRESTClients(endpointServerPools)
|
||||
var onlineServers int
|
||||
var offlineEndpoints []string
|
||||
var retries int
|
||||
@@ -178,25 +176,30 @@ func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones)
|
||||
}
|
||||
onlineServers++
|
||||
}
|
||||
// Sleep for a while - so that we don't go into
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
retries++
|
||||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
// Sleep for a while - so that we don't go into
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
retries++
|
||||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d remote servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient {
|
||||
func newBootstrapRESTClients(endpointServerPools EndpointServerPools) []*bootstrapRESTClient {
|
||||
seenHosts := set.NewStringSet()
|
||||
var clnts []*bootstrapRESTClient
|
||||
for _, ep := range endpointZones {
|
||||
for _, ep := range endpointServerPools {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
@@ -205,11 +208,7 @@ func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient
|
||||
|
||||
// Only proceed for remote endpoints.
|
||||
if !endpoint.IsLocal {
|
||||
clnt, err := newBootstrapRESTClient(endpoint)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
clnts = append(clnts, clnt)
|
||||
clnts = append(clnts, newBootstrapRESTClient(endpoint))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -217,35 +216,15 @@ func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient
|
||||
}
|
||||
|
||||
// Returns a new bootstrap client.
|
||||
func newBootstrapRESTClient(endpoint Endpoint) (*bootstrapRESTClient, error) {
|
||||
func newBootstrapRESTClient(endpoint Endpoint) *bootstrapRESTClient {
|
||||
serverURL := &url.URL{
|
||||
Scheme: endpoint.Scheme,
|
||||
Host: endpoint.Host,
|
||||
Path: bootstrapRESTPath,
|
||||
}
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
if globalIsSSL {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: endpoint.Hostname(),
|
||||
RootCAs: globalRootCAs,
|
||||
}
|
||||
}
|
||||
restClient := rest.NewClient(serverURL, globalInternodeTransport, newAuthToken)
|
||||
restClient.HealthCheckFn = nil
|
||||
|
||||
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)
|
||||
restClient, err := rest.NewClient(serverURL, trFn, newAuthToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
restClient.HealthCheckFn = func() bool {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, restClient.HealthCheckTimeout)
|
||||
respBody, err := restClient.CallWithContext(ctx, bootstrapRESTMethodHealth, nil, nil, -1)
|
||||
xhttp.DrainBody(respBody)
|
||||
cancel()
|
||||
var ne *rest.NetworkError
|
||||
return !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &ne)
|
||||
}
|
||||
|
||||
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient}, nil
|
||||
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ const (
|
||||
func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketEncryption")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketEncryption", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -102,7 +102,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketEncryption")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketEncryption", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -145,7 +145,7 @@ func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketEncryption")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketEncryption", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
|
||||
@@ -34,7 +34,7 @@ func NewBucketSSEConfigSys() *BucketSSEConfigSys {
|
||||
// Get - gets bucket encryption config for the given bucket.
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
@@ -22,22 +22,28 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio-go/v6/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
@@ -46,8 +52,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
objectLockConfig = "object-lock.xml"
|
||||
bucketTaggingConfig = "tagging.xml"
|
||||
objectLockConfig = "object-lock.xml"
|
||||
bucketTaggingConfig = "tagging.xml"
|
||||
bucketReplicationConfig = "replication.xml"
|
||||
)
|
||||
|
||||
// Check if there are buckets on server without corresponding entry in etcd backend and
|
||||
@@ -68,7 +75,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
|
||||
// Get buckets in the DNS
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && err != dns.ErrNoEntriesFound {
|
||||
if err != nil && !IsErrIgnored(err, dns.ErrNoEntriesFound, dns.ErrNotImplemented, dns.ErrDomainMissing) {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return
|
||||
}
|
||||
@@ -76,38 +83,53 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
bucketsSet := set.NewStringSet()
|
||||
bucketsToBeUpdated := set.NewStringSet()
|
||||
bucketsInConflict := set.NewStringSet()
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
r, ok := dnsBuckets[bucket.Name]
|
||||
if !ok {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
// No difference in terms of domainIPs and nothing
|
||||
// has changed so we don't change anything on the etcd.
|
||||
|
||||
// This means that domain is updated, we should update
|
||||
// all bucket entries with new domain name.
|
||||
domainMissing := err == dns.ErrDomainMissing
|
||||
if dnsBuckets != nil {
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
r, ok := dnsBuckets[bucket.Name]
|
||||
if !ok {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
// if domain IPs intersect then it won't be an empty set.
|
||||
// such an intersection means that bucket exists on etcd.
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() && !domainMissing {
|
||||
// No difference in terms of domainIPs and nothing
|
||||
// has changed so we don't change anything on the etcd.
|
||||
//
|
||||
// Additionally also check if domain is updated/missing with more
|
||||
// entries, if that is the case we should update the
|
||||
// new domain entries as well.
|
||||
continue
|
||||
}
|
||||
|
||||
// if domain IPs intersect then it won't be an empty set.
|
||||
// such an intersection means that bucket exists on etcd.
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// No IPs seem to intersect, this means that bucket exists but has
|
||||
// different IP addresses perhaps from a different deployment.
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
bucketsInConflict.Add(bucket.Name)
|
||||
}
|
||||
// No IPs seem to intersect, this means that bucket exists but has
|
||||
// different IP addresses perhaps from a different deployment.
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
bucketsInConflict.Add(bucket.Name)
|
||||
}
|
||||
|
||||
// Add/update buckets that are not registered with the DNS
|
||||
g := errgroup.WithNErrs(len(buckets))
|
||||
bucketsToBeUpdatedSlice := bucketsToBeUpdated.ToSlice()
|
||||
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
|
||||
ctx, cancel := g.WithCancelOnError(GlobalContext)
|
||||
defer cancel()
|
||||
|
||||
for index := range bucketsToBeUpdatedSlice {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
@@ -115,16 +137,16 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
}, index)
|
||||
}
|
||||
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
}
|
||||
if err := g.WaitErr(); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, bucket := range bucketsInConflict.ToSlice() {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
// Remove buckets that are in DNS for this server, but aren't local
|
||||
for bucket, records := range dnsBuckets {
|
||||
if bucketsSet.Contains(bucket) {
|
||||
@@ -136,13 +158,18 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
continue
|
||||
}
|
||||
|
||||
// We go to here, so we know the bucket no longer exists,
|
||||
// but is registered in DNS to this server
|
||||
if err = globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
bucket, err))
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(bucket string) {
|
||||
defer wg.Done()
|
||||
// We go to here, so we know the bucket no longer exists,
|
||||
// but is registered in DNS to this server
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
bucket, err))
|
||||
}
|
||||
}(bucket)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// GetBucketLocationHandler - GET Bucket location.
|
||||
@@ -151,7 +178,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketLocation")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketLocation", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -199,7 +226,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListMultipartUploads")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListMultipartUploads", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -254,7 +281,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBuckets")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListBuckets", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
@@ -274,7 +301,9 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
var bucketsInfo []BucketInfo
|
||||
if globalDNSConfig != nil && globalBucketFederation {
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && err != dns.ErrNoEntriesFound {
|
||||
if err != nil && !IsErrIgnored(err,
|
||||
dns.ErrNoEntriesFound,
|
||||
dns.ErrDomainMissing) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -284,6 +313,11 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
Created: dnsRecords[0].CreationDate,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(bucketsInfo, func(i, j int) bool {
|
||||
return bucketsInfo[i].Name < bucketsInfo[j].Name
|
||||
})
|
||||
|
||||
} else {
|
||||
// Invoke the list buckets.
|
||||
var err error
|
||||
@@ -341,7 +375,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteMultipleObjects")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteMultipleObjects", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -377,6 +411,10 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Call checkRequestAuthType to populate ReqInfo.AccessKey before GetBucketInfo()
|
||||
// Ignore errors here to preserve the S3 error behavior of GetBucketInfo()
|
||||
checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "")
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objectAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
@@ -389,12 +427,29 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
deleteObjectsFn = api.CacheAPI().DeleteObjects
|
||||
}
|
||||
|
||||
// Return Malformed XML as S3 spec if the list of objects is empty
|
||||
if len(deleteObjects.Objects) == 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
var objectsToDelete = map[ObjectToDelete]int{}
|
||||
getObjectInfoFn := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
var (
|
||||
hasLockEnabled, hasLifecycleConfig, replicateSync bool
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
)
|
||||
replicateDeletes := hasReplicationRules(ctx, bucket, deleteObjects.Objects)
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
hasLockEnabled = true
|
||||
}
|
||||
if _, err := globalBucketMetadataSys.GetLifecycleConfig(bucket); err == nil {
|
||||
hasLifecycleConfig = true
|
||||
}
|
||||
dErrs := make([]DeleteError, len(deleteObjects.Objects))
|
||||
for index, object := range deleteObjects.Objects {
|
||||
if apiErrCode := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); apiErrCode != ErrNone {
|
||||
@@ -411,10 +466,55 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
continue
|
||||
}
|
||||
if object.VersionID != "" && object.VersionID != nullVersionID {
|
||||
if _, err := uuid.Parse(object.VersionID); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("invalid version-id specified %w", err))
|
||||
apiErr := errorCodes.ToAPIErr(ErrNoSuchVersion)
|
||||
dErrs[index] = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if replicateDeletes || hasLockEnabled || hasLifecycleConfig {
|
||||
goi, gerr = getObjectInfoFn(ctx, bucket, object.ObjectName, ObjectOptions{
|
||||
VersionID: object.VersionID,
|
||||
})
|
||||
}
|
||||
if hasLifecycleConfig && gerr == nil {
|
||||
object.PurgeTransitioned = goi.TransitionStatus
|
||||
}
|
||||
if replicateDeletes {
|
||||
delMarker, replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
ObjectName: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}, goi, gerr)
|
||||
replicateSync = repsync
|
||||
if replicate {
|
||||
if apiErrCode := checkRequestAuthType(ctx, r, policy.ReplicateDeleteAction, bucket, object.ObjectName); apiErrCode != ErrNone {
|
||||
if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
if object.VersionID != "" {
|
||||
object.VersionPurgeStatus = Pending
|
||||
if delMarker {
|
||||
object.DeleteMarkerVersionID = object.VersionID
|
||||
}
|
||||
} else {
|
||||
object.DeleteMarkerReplicationStatus = string(replication.Pending)
|
||||
}
|
||||
}
|
||||
}
|
||||
if object.VersionID != "" {
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, getObjectInfoFn); apiErrCode != ErrNone {
|
||||
if hasLockEnabled {
|
||||
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, goi, gerr); apiErrCode != ErrNone {
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
dErrs[index] = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
@@ -445,17 +545,27 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
deleteList := toNames(objectsToDelete)
|
||||
dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
})
|
||||
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
for i := range errs {
|
||||
dindex := objectsToDelete[deleteList[i]]
|
||||
apiErr := toAPIError(ctx, errs[i])
|
||||
if apiErr.Code == "" || apiErr.Code == "NoSuchKey" {
|
||||
dindex := objectsToDelete[ObjectToDelete{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
VersionPurgeStatus: dObjects[i].VersionPurgeStatus,
|
||||
DeleteMarkerReplicationStatus: dObjects[i].DeleteMarkerReplicationStatus,
|
||||
PurgeTransitioned: dObjects[i].PurgeTransitioned,
|
||||
}]
|
||||
if errs[i] == nil || isErrObjectNotFound(errs[i]) || isErrVersionNotFound(errs[i]) {
|
||||
if replicateDeletes {
|
||||
dObjects[i].DeleteMarkerReplicationStatus = deleteList[i].DeleteMarkerReplicationStatus
|
||||
dObjects[i].VersionPurgeStatus = deleteList[i].VersionPurgeStatus
|
||||
}
|
||||
deletedObjects[dindex] = dObjects[i]
|
||||
continue
|
||||
}
|
||||
apiErr := toAPIError(ctx, errs[i])
|
||||
dErrs[dindex] = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
@@ -477,22 +587,43 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
|
||||
// Notify deleted event for objects.
|
||||
for _, dobj := range deletedObjects {
|
||||
if dobj.ObjectName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if replicateDeletes {
|
||||
if dobj.DeleteMarkerReplicationStatus == string(replication.Pending) || dobj.VersionPurgeStatus == Pending {
|
||||
dv := DeletedObjectVersionInfo{
|
||||
DeletedObject: dobj,
|
||||
Bucket: bucket,
|
||||
}
|
||||
scheduleReplicationDelete(ctx, dv, objectAPI, replicateSync)
|
||||
}
|
||||
}
|
||||
|
||||
if hasLifecycleConfig && dobj.PurgeTransitioned == lifecycle.TransitionComplete { // clean up transitioned tier
|
||||
deleteTransitionedObject(ctx, objectAPI, bucket, dobj.ObjectName, lifecycle.ObjectOpts{
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
}, false, true)
|
||||
}
|
||||
|
||||
eventName := event.ObjectRemovedDelete
|
||||
objInfo := ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
}
|
||||
|
||||
if dobj.DeleteMarker {
|
||||
objInfo = ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
VersionID: dobj.DeleteMarkerVersionID,
|
||||
}
|
||||
objInfo.DeleteMarker = dobj.DeleteMarker
|
||||
objInfo.VersionID = dobj.DeleteMarkerVersionID
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectRemovedDelete,
|
||||
EventName: eventName,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
@@ -509,7 +640,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucket", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
@@ -557,7 +688,9 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
if globalDNSConfig != nil {
|
||||
sr, err := globalDNSConfig.Get(bucket)
|
||||
if err != nil {
|
||||
if err == dns.ErrNoEntriesFound {
|
||||
// ErrNotImplemented indicates a DNS backend that doesn't need to check if bucket already
|
||||
// exists elsewhere
|
||||
if err == dns.ErrNoEntriesFound || err == dns.ErrNotImplemented {
|
||||
// Proceed to creating a bucket.
|
||||
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
@@ -578,6 +711,16 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
getObjectLocation(r, globalDomainNames, bucket, ""))
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.BucketCreated,
|
||||
BucketName: bucket,
|
||||
ReqParams: extractReqParams(r),
|
||||
RespElements: extractRespElements(w),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: handlers.GetSourceIP(r),
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
@@ -610,6 +753,15 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
w.Header().Set(xhttp.Location, path.Clean(r.URL.Path)) // Clean any trailing slashes.
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.BucketCreated,
|
||||
BucketName: bucket,
|
||||
ReqParams: extractReqParams(r),
|
||||
RespElements: extractRespElements(w),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: handlers.GetSourceIP(r),
|
||||
})
|
||||
}
|
||||
|
||||
// PostPolicyBucketHandler - POST policy
|
||||
@@ -619,7 +771,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PostPolicyBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "PostPolicyBucket", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
@@ -631,16 +783,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if !api.EncryptionEnabled() && crypto.IsRequested(r.Header) {
|
||||
|
||||
if _, ok := crypto.IsRequested(r.Header); !objectAPI.IsEncryptionSupported() && ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
|
||||
// To detect if the client has disconnected.
|
||||
r.Body = &contextReader{r.Body, r.Context()}
|
||||
|
||||
// Require Content-Length to be set in the request
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
@@ -739,7 +889,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Make sure formValues adhere to policy restrictions.
|
||||
if err = checkPostPolicy(formValues, postPolicyForm); err != nil {
|
||||
writeCustomErrorResponseXML(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), err.Error(), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrAccessDenied, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -761,27 +911,27 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Extract metadata to be saved from received Form.
|
||||
metadata := make(map[string]string)
|
||||
err = extractMetadataFromMap(ctx, formValues, metadata)
|
||||
err = extractMetadataFromMime(ctx, textproto.MIMEHeader(formValues), metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
rawReader := hashReader
|
||||
pReader := NewPutObjReader(rawReader, nil, nil)
|
||||
pReader := NewPutObjReader(rawReader)
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
|
||||
// Check if bucket encryption is enabled
|
||||
if _, err = globalBucketSSEConfigSys.Get(bucket); err == nil || globalAutoEncryption {
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
r.Header.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -793,7 +943,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if crypto.IsRequested(formValues) && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
if _, ok := crypto.IsRequested(formValues); ok && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -814,12 +964,16 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
info := ObjectInfo{Size: fileSize}
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
pReader = NewPutObjReader(rawReader, hashReader, &objectEncryptionKey)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -885,7 +1039,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HeadBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "HeadBucket", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -915,7 +1069,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucket", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -976,7 +1130,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually using etcdctl", err))
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually", err))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -984,6 +1138,15 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.BucketRemoved,
|
||||
BucketName: bucket,
|
||||
ReqParams: extractReqParams(r),
|
||||
RespElements: extractRespElements(w),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: handlers.GetSourceIP(r),
|
||||
})
|
||||
}
|
||||
|
||||
// PutBucketObjectLockConfigHandler - PUT Bucket object lock configuration.
|
||||
@@ -994,7 +1157,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketObjectLockConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketObjectLockConfig", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1004,7 +1167,10 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -1047,7 +1213,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketObjectLockConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketObjectLockConfig", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1085,7 +1251,7 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketTagging")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketTagging", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1129,7 +1295,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketTagging")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketTagging", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1167,7 +1333,7 @@ func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketTagging")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketTagging", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1191,3 +1357,141 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// PutBucketReplicationConfigHandler - PUT Bucket replication configuration.
|
||||
// ----------
|
||||
// Add a replication configuration on the specified bucket as specified in https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html
|
||||
func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketReplicationConfig")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if versioned := globalBucketVersioningSys.Enabled(bucket); !versioned {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNeedsVersioningError), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
replicationConfig, err := replication.ParseConfig(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
apiErr.Description = err.Error()
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
sameTarget, err := validateReplicationDestination(ctx, bucket, replicationConfig)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Validate the received bucket replication config
|
||||
if err = replicationConfig.Validate(bucket, sameTarget); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(replicationConfig)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketReplicationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketReplicationConfigHandler - GET Bucket replication configuration.
|
||||
// ----------
|
||||
// Gets the replication configuration for a bucket.
|
||||
func (api objectAPIHandlers) GetBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketReplicationConfig")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// check if user has permissions to perform this operation
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, configData)
|
||||
}
|
||||
|
||||
// DeleteBucketReplicationConfigHandler - DELETE Bucket replication config.
|
||||
// ----------
|
||||
func (api objectAPIHandlers) DeleteBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketReplicationConfig")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketReplicationConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -36,7 +35,7 @@ func TestRemoveBucketHandler(t *testing.T) {
|
||||
|
||||
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewBuffer([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
|
||||
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewReader([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Error uploading object: <ERROR> %v", err)
|
||||
@@ -45,7 +44,7 @@ func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
// construct HTTP request for DELETE bucket.
|
||||
req, err := newTestSignedRequestV4("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
|
||||
req, err := newTestSignedRequestV4(http.MethodDelete, getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
|
||||
}
|
||||
@@ -61,7 +60,7 @@ func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for DELETE bucket.
|
||||
reqV2, err := newTestSignedRequestV2("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodDelete, getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
|
||||
}
|
||||
@@ -129,7 +128,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
// construct HTTP request for Get bucket location.
|
||||
req, err := newTestSignedRequestV4("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
req, err := newTestSignedRequestV4(http.MethodGet, getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -161,7 +160,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
@@ -192,7 +191,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
|
||||
// Test for Anonymous/unsigned http request.
|
||||
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make any difference.
|
||||
anonReq, err := newTestRequest("GET", getBucketLocationURL("", bucketName), 0, nil)
|
||||
anonReq, err := newTestRequest(http.MethodGet, getBucketLocationURL("", bucketName), 0, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
|
||||
}
|
||||
@@ -208,7 +207,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
|
||||
|
||||
nilBucket := "dummy-bucket"
|
||||
nilReq, err := newTestRequest("GET", getBucketLocationURL("", nilBucket), 0, nil)
|
||||
nilReq, err := newTestRequest(http.MethodGet, getBucketLocationURL("", nilBucket), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
@@ -265,7 +264,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
// construct HTTP request for HEAD bucket.
|
||||
req, err := newTestSignedRequestV4("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
req, err := newTestSignedRequestV4(http.MethodHead, getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for HeadBucketHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -280,7 +279,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodHead, getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
@@ -295,7 +294,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
}
|
||||
|
||||
// Test for Anonymous/unsigned http request.
|
||||
anonReq, err := newTestRequest("HEAD", getHEADBucketURL("", bucketName), 0, nil)
|
||||
anonReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
@@ -313,7 +312,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
|
||||
|
||||
nilBucket := "dummy-bucket"
|
||||
nilReq, err := newTestRequest("HEAD", getHEADBucketURL("", nilBucket), 0, nil)
|
||||
nilReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", nilBucket), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
@@ -482,7 +481,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
|
||||
// construct HTTP request for List multipart uploads endpoint.
|
||||
u := getListMultipartUploadsURLWithParams("", testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads)
|
||||
req, gerr := newTestSignedRequestV4("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
req, gerr := newTestSignedRequestV4(http.MethodGet, u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if gerr != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", i+1, instanceType, gerr)
|
||||
}
|
||||
@@ -499,7 +498,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
|
||||
// verify response for V2 signed HTTP request.
|
||||
reqV2, err := newTestSignedRequestV2("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -516,7 +515,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
|
||||
// construct HTTP request for List multipart uploads endpoint.
|
||||
u := getListMultipartUploadsURLWithParams("", bucketName, "", "", "", "", "")
|
||||
req, err := newTestSignedRequestV4("GET", u, 0, nil, "", "", nil) // Generate an anonymous request.
|
||||
req, err := newTestSignedRequestV4(http.MethodGet, u, 0, nil, "", "", nil) // Generate an anonymous request.
|
||||
if err != nil {
|
||||
t.Fatalf("Test %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", instanceType, err)
|
||||
}
|
||||
@@ -530,7 +529,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
url := getListMultipartUploadsURLWithParams("", testCases[6].bucket, testCases[6].prefix, testCases[6].keyMarker,
|
||||
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
|
||||
// Test for Anonymous/unsigned http request.
|
||||
anonReq, err := newTestRequest("GET", url, 0, nil)
|
||||
anonReq, err := newTestRequest(http.MethodGet, url, 0, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@@ -550,7 +549,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
url = getListMultipartUploadsURLWithParams("", nilBucket, "dummy-prefix", testCases[6].keyMarker,
|
||||
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
|
||||
|
||||
nilReq, err := newTestRequest("GET", url, 0, nil)
|
||||
nilReq, err := newTestRequest(http.MethodGet, url, 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
@@ -596,7 +595,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
for i, testCase := range testCases {
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
req, lerr := newTestSignedRequestV4("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
req, lerr := newTestSignedRequestV4(http.MethodGet, getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if lerr != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListBucketsHandler: <ERROR> %v", i+1, instanceType, lerr)
|
||||
}
|
||||
@@ -613,7 +612,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
|
||||
// verify response for V2 signed HTTP request.
|
||||
reqV2, err := newTestSignedRequestV2("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
@@ -628,7 +627,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
|
||||
// Test for Anonymous/unsigned http request.
|
||||
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make a difference.
|
||||
anonReq, err := newTestRequest("GET", getListBucketURL(""), 0, nil)
|
||||
anonReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
|
||||
@@ -644,7 +643,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
|
||||
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
|
||||
|
||||
nilReq, err := newTestRequest("GET", getListBucketURL(""), 0, nil)
|
||||
nilReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
@@ -670,7 +669,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
for i := 0; i < 10; i++ {
|
||||
objectName := "test-object-" + strconv.Itoa(i)
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
|
||||
_, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
||||
@@ -808,10 +807,10 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
|
||||
// Generate a signed or anonymous request based on the testCase
|
||||
if testCase.accessKey != "" {
|
||||
req, err = newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", bucketName),
|
||||
req, err = newTestSignedRequestV4(http.MethodPost, getDeleteMultipleObjectsURL("", bucketName),
|
||||
int64(len(testCase.objects)), bytes.NewReader(testCase.objects), testCase.accessKey, testCase.secretKey, nil)
|
||||
} else {
|
||||
req, err = newTestRequest("POST", getDeleteMultipleObjectsURL("", bucketName),
|
||||
req, err = newTestRequest(http.MethodPost, getDeleteMultipleObjectsURL("", bucketName),
|
||||
int64(len(testCase.objects)), bytes.NewReader(testCase.objects))
|
||||
}
|
||||
|
||||
@@ -837,7 +836,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
|
||||
// Verify whether the bucket obtained object is same as the one created.
|
||||
if testCase.expectedContent != nil && !bytes.Equal(testCase.expectedContent, actualContent) {
|
||||
fmt.Println(string(testCase.expectedContent), string(actualContent))
|
||||
t.Log(string(testCase.expectedContent), string(actualContent))
|
||||
t.Errorf("Test %d : MinIO %s: Object content differs from expected value.", i+1, instanceType)
|
||||
}
|
||||
}
|
||||
@@ -850,7 +849,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
nilBucket := "dummy-bucket"
|
||||
nilObject := ""
|
||||
|
||||
nilReq, err := newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "", nil)
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodPost, getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "", nil)
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ const (
|
||||
func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketLifecycle")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketLifecycle", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -78,6 +78,12 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the transition storage ARNs
|
||||
if err = validateLifecycleTransition(ctx, bucket, bucketLifecycle); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(bucketLifecycle)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
@@ -97,7 +103,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketLifecycle")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketLifecycle", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -139,7 +145,7 @@ func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketLifecycle")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketLifecycle", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
|
||||
@@ -50,7 +50,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
|
||||
}{
|
||||
// GET empty credentials
|
||||
{
|
||||
method: "GET", bucketName: bucketName,
|
||||
method: http.MethodGet, bucketName: bucketName,
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
@@ -64,7 +64,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
|
||||
},
|
||||
// GET wrong credentials
|
||||
{
|
||||
method: "GET", bucketName: bucketName,
|
||||
method: http.MethodGet, bucketName: bucketName,
|
||||
accessKey: "abcd",
|
||||
secretKey: "abcd",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
@@ -78,7 +78,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
|
||||
},
|
||||
// PUT empty credentials
|
||||
{
|
||||
method: "PUT",
|
||||
method: http.MethodPut,
|
||||
bucketName: bucketName,
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
@@ -93,7 +93,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
|
||||
},
|
||||
// PUT wrong credentials
|
||||
{
|
||||
method: "PUT",
|
||||
method: http.MethodPut,
|
||||
bucketName: bucketName,
|
||||
accessKey: "abcd",
|
||||
secretKey: "abcd",
|
||||
@@ -108,7 +108,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
|
||||
},
|
||||
// DELETE empty credentials
|
||||
{
|
||||
method: "DELETE",
|
||||
method: http.MethodDelete,
|
||||
bucketName: bucketName,
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
@@ -123,7 +123,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
|
||||
},
|
||||
// DELETE wrong credentials
|
||||
{
|
||||
method: "DELETE",
|
||||
method: http.MethodDelete,
|
||||
bucketName: bucketName,
|
||||
accessKey: "abcd",
|
||||
secretKey: "abcd",
|
||||
@@ -168,7 +168,7 @@ func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName strin
|
||||
// Test case - 1.
|
||||
// Filter contains more than (Prefix,Tag,And) rule
|
||||
{
|
||||
method: "PUT",
|
||||
method: http.MethodPut,
|
||||
bucketName: bucketName,
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
@@ -185,7 +185,7 @@ func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName strin
|
||||
},
|
||||
// Date contains wrong format
|
||||
{
|
||||
method: "PUT",
|
||||
method: http.MethodPut,
|
||||
bucketName: bucketName,
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
@@ -201,7 +201,7 @@ func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName strin
|
||||
shouldPass: false,
|
||||
},
|
||||
{
|
||||
method: "PUT",
|
||||
method: http.MethodPut,
|
||||
bucketName: bucketName,
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
@@ -212,7 +212,7 @@ func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName strin
|
||||
shouldPass: true,
|
||||
},
|
||||
{
|
||||
method: "GET",
|
||||
method: http.MethodGet,
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
bucketName: bucketName,
|
||||
@@ -223,7 +223,7 @@ func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName strin
|
||||
shouldPass: true,
|
||||
},
|
||||
{
|
||||
method: "DELETE",
|
||||
method: http.MethodDelete,
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
bucketName: bucketName,
|
||||
@@ -234,7 +234,7 @@ func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName strin
|
||||
shouldPass: true,
|
||||
},
|
||||
{
|
||||
method: "GET",
|
||||
method: http.MethodGet,
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
bucketName: bucketName,
|
||||
|
||||
@@ -17,7 +17,25 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
sse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/s3select"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -31,7 +49,7 @@ type LifecycleSys struct{}
|
||||
// Get - gets lifecycle config associated to a given bucket name.
|
||||
func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -46,3 +64,644 @@ func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, err er
|
||||
func NewLifecycleSys() *LifecycleSys {
|
||||
return &LifecycleSys{}
|
||||
}
|
||||
|
||||
type expiryTask struct {
|
||||
objInfo ObjectInfo
|
||||
versionExpiry bool
|
||||
}
|
||||
|
||||
type expiryState struct {
|
||||
expiryCh chan expiryTask
|
||||
}
|
||||
|
||||
func (es *expiryState) queueExpiryTask(oi ObjectInfo, rmVersion bool) {
|
||||
select {
|
||||
case es.expiryCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalExpiryState *expiryState
|
||||
)
|
||||
|
||||
func newExpiryState() *expiryState {
|
||||
es := &expiryState{
|
||||
expiryCh: make(chan expiryTask, 10000),
|
||||
}
|
||||
go func() {
|
||||
<-GlobalContext.Done()
|
||||
close(es.expiryCh)
|
||||
}()
|
||||
return es
|
||||
}
|
||||
|
||||
func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
|
||||
globalExpiryState = newExpiryState()
|
||||
go func() {
|
||||
for t := range globalExpiryState.expiryCh {
|
||||
applyExpiryRule(ctx, objectAPI, t.objInfo, false, t.versionExpiry)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
type transitionState struct {
|
||||
// add future metrics here
|
||||
transitionCh chan ObjectInfo
|
||||
}
|
||||
|
||||
func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
|
||||
select {
|
||||
case t.transitionCh <- oi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalTransitionState *transitionState
|
||||
globalTransitionConcurrent = runtime.GOMAXPROCS(0) / 2
|
||||
)
|
||||
|
||||
func newTransitionState() *transitionState {
|
||||
|
||||
// fix minimum concurrent transition to 1 for single CPU setup
|
||||
if globalTransitionConcurrent == 0 {
|
||||
globalTransitionConcurrent = 1
|
||||
}
|
||||
ts := &transitionState{
|
||||
transitionCh: make(chan ObjectInfo, 10000),
|
||||
}
|
||||
go func() {
|
||||
<-GlobalContext.Done()
|
||||
close(ts.transitionCh)
|
||||
}()
|
||||
return ts
|
||||
}
|
||||
|
||||
// addWorker creates a new worker to process tasks
|
||||
func (t *transitionState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
// Add a new worker.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case oi, ok := <-t.transitionCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := transitionObject(ctx, objectAPI, oi); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func initBackgroundTransition(ctx context.Context, objectAPI ObjectLayer) {
|
||||
if globalTransitionState == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start with globalTransitionConcurrent.
|
||||
for i := 0; i < globalTransitionConcurrent; i++ {
|
||||
globalTransitionState.addWorker(ctx, objectAPI)
|
||||
}
|
||||
}
|
||||
|
||||
func validateLifecycleTransition(ctx context.Context, bucket string, lfc *lifecycle.Lifecycle) error {
|
||||
for _, rule := range lfc.Rules {
|
||||
if rule.Transition.StorageClass != "" {
|
||||
sameTarget, destbucket, err := validateTransitionDestination(ctx, bucket, rule.Transition.StorageClass)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sameTarget && destbucket == bucket {
|
||||
return fmt.Errorf("Transition destination cannot be the same as the source bucket")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateTransitionDestination returns error if transition destination bucket missing or not configured
|
||||
// It also returns true if transition destination is same as this server.
|
||||
func validateTransitionDestination(ctx context.Context, bucket string, targetLabel string) (bool, string, error) {
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetWithLabel(ctx, bucket, targetLabel)
|
||||
if tgt == nil {
|
||||
return false, "", BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
arn, err := madmin.ParseARN(tgt.Arn)
|
||||
if err != nil {
|
||||
return false, "", BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
if arn.Type != madmin.ILMService {
|
||||
return false, "", BucketRemoteArnTypeInvalid{}
|
||||
}
|
||||
clnt := globalBucketTargetSys.GetRemoteTargetClient(ctx, tgt.Arn)
|
||||
if clnt == nil {
|
||||
return false, "", BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
if found, _ := clnt.BucketExists(ctx, arn.Bucket); !found {
|
||||
return false, "", BucketRemoteDestinationNotFound{Bucket: arn.Bucket}
|
||||
}
|
||||
sameTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
|
||||
return sameTarget, arn.Bucket, nil
|
||||
}
|
||||
|
||||
// transitionSC returns storage class label for this bucket
|
||||
func transitionSC(ctx context.Context, bucket string) string {
|
||||
cfg, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
for _, rule := range cfg.Rules {
|
||||
if rule.Status == Disabled {
|
||||
continue
|
||||
}
|
||||
if rule.Transition.StorageClass != "" {
|
||||
return rule.Transition.StorageClass
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// return true if ARN representing transition storage class is present in a active rule
|
||||
// for the lifecycle configured on this bucket
|
||||
func transitionSCInUse(ctx context.Context, lfc *lifecycle.Lifecycle, bucket, arnStr string) bool {
|
||||
tgtLabel := globalBucketTargetSys.GetRemoteLabelWithArn(ctx, bucket, arnStr)
|
||||
if tgtLabel == "" {
|
||||
return false
|
||||
}
|
||||
for _, rule := range lfc.Rules {
|
||||
if rule.Status == Disabled {
|
||||
continue
|
||||
}
|
||||
if rule.Transition.StorageClass != "" && rule.Transition.StorageClass == tgtLabel {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// set PutObjectOptions for PUT operation to transition data to target cluster
|
||||
func putTransitionOpts(objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
|
||||
meta := make(map[string]string)
|
||||
|
||||
putOpts = miniogo.PutObjectOptions{
|
||||
UserMetadata: meta,
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: objInfo.StorageClass,
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
},
|
||||
}
|
||||
|
||||
if objInfo.UserTags != "" {
|
||||
tag, _ := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if tag != nil {
|
||||
putOpts.UserTags = tag.ToMap()
|
||||
}
|
||||
}
|
||||
|
||||
lkMap := caseInsensitiveMap(objInfo.UserDefined)
|
||||
if lang, ok := lkMap.Lookup(xhttp.ContentLanguage); ok {
|
||||
putOpts.ContentLanguage = lang
|
||||
}
|
||||
if disp, ok := lkMap.Lookup(xhttp.ContentDisposition); ok {
|
||||
putOpts.ContentDisposition = disp
|
||||
}
|
||||
if cc, ok := lkMap.Lookup(xhttp.CacheControl); ok {
|
||||
putOpts.CacheControl = cc
|
||||
}
|
||||
if mode, ok := lkMap.Lookup(xhttp.AmzObjectLockMode); ok {
|
||||
rmode := miniogo.RetentionMode(mode)
|
||||
putOpts.Mode = rmode
|
||||
}
|
||||
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
|
||||
rdate, err := time.Parse(time.RFC3339, retainDateStr)
|
||||
if err != nil {
|
||||
return putOpts, err
|
||||
}
|
||||
putOpts.RetainUntilDate = rdate
|
||||
}
|
||||
if lhold, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
|
||||
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
|
||||
}
|
||||
|
||||
return putOpts, nil
|
||||
}
|
||||
|
||||
// handle deletes of transitioned objects or object versions when one of the following is true:
|
||||
// 1. temporarily restored copies of objects (restored with the PostRestoreObject API) expired.
|
||||
// 2. life cycle expiry date is met on the object.
|
||||
// 3. Object is removed through DELETE api call
|
||||
func deleteTransitionedObject(ctx context.Context, objectAPI ObjectLayer, bucket, object string, lcOpts lifecycle.ObjectOpts, restoredObject, isDeleteTierOnly bool) error {
|
||||
if lcOpts.TransitionStatus == "" && !isDeleteTierOnly {
|
||||
return nil
|
||||
}
|
||||
lc, err := globalLifecycleSys.Get(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
arn := getLifecycleTransitionTargetArn(ctx, lc, bucket, lcOpts)
|
||||
if arn == nil {
|
||||
return fmt.Errorf("remote target not configured")
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arn.String())
|
||||
if tgt == nil {
|
||||
return fmt.Errorf("remote target not configured")
|
||||
}
|
||||
|
||||
var opts ObjectOptions
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(bucket)
|
||||
opts.VersionID = lcOpts.VersionID
|
||||
if restoredObject {
|
||||
// delete locally restored copy of object or object version
|
||||
// from the source, while leaving metadata behind. The data on
|
||||
// transitioned tier lies untouched and still accessible
|
||||
opts.TransitionStatus = lcOpts.TransitionStatus
|
||||
_, err = objectAPI.DeleteObject(ctx, bucket, object, opts)
|
||||
return err
|
||||
}
|
||||
|
||||
// When an object is past expiry, delete the data from transitioned tier and
|
||||
// metadata from source
|
||||
if err := tgt.RemoveObject(context.Background(), arn.Bucket, object, miniogo.RemoveObjectOptions{VersionID: lcOpts.VersionID}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
if isDeleteTierOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.DeleteObject(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eventName := event.ObjectRemovedDelete
|
||||
if lcOpts.DeleteMarker {
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [ILM-EXPIRY]",
|
||||
})
|
||||
|
||||
// should never reach here
|
||||
return nil
|
||||
}
|
||||
|
||||
// transition object to target specified by the transition ARN. When an object is transitioned to another
|
||||
// storage specified by the transition ARN, the metadata is left behind on source cluster and original content
|
||||
// is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved
|
||||
// to the transition tier without decrypting or re-encrypting.
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, objInfo ObjectInfo) error {
|
||||
lc, err := globalLifecycleSys.Get(objInfo.Bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lcOpts := lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
}
|
||||
arn := getLifecycleTransitionTargetArn(ctx, lc, objInfo.Bucket, lcOpts)
|
||||
if arn == nil {
|
||||
return fmt.Errorf("remote target not configured")
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arn.String())
|
||||
if tgt == nil {
|
||||
return fmt.Errorf("remote target not configured")
|
||||
}
|
||||
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, objInfo.Bucket, objInfo.Name, nil, http.Header{}, readLock, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
TransitionStatus: lifecycle.TransitionPending,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oi := gr.ObjInfo
|
||||
if oi.TransitionStatus == lifecycle.TransitionComplete {
|
||||
gr.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
putOpts, err := putTransitionOpts(oi)
|
||||
if err != nil {
|
||||
gr.Close()
|
||||
return err
|
||||
|
||||
}
|
||||
if _, err = tgt.PutObject(ctx, arn.Bucket, oi.Name, gr, oi.Size, putOpts); err != nil {
|
||||
gr.Close()
|
||||
return err
|
||||
}
|
||||
gr.Close()
|
||||
|
||||
var opts ObjectOptions
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(oi.Bucket)
|
||||
opts.VersionID = oi.VersionID
|
||||
opts.TransitionStatus = lifecycle.TransitionComplete
|
||||
eventName := event.ObjectTransitionComplete
|
||||
|
||||
objInfo, err = objectAPI.DeleteObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
if err != nil {
|
||||
eventName = event.ObjectTransitionFailed
|
||||
}
|
||||
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: objInfo.Bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [ILM-Transition]",
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// getLifecycleTransitionTargetArn returns transition ARN for storage class specified in the config.
|
||||
func getLifecycleTransitionTargetArn(ctx context.Context, lc *lifecycle.Lifecycle, bucket string, obj lifecycle.ObjectOpts) *madmin.ARN {
|
||||
for _, rule := range lc.FilterActionableRules(obj) {
|
||||
if rule.Transition.StorageClass != "" {
|
||||
return globalBucketTargetSys.GetRemoteArnWithLabel(ctx, bucket, rule.Transition.StorageClass)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getTransitionedObjectReader returns a reader from the transitioned tier.
|
||||
func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, oi ObjectInfo, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
var lc *lifecycle.Lifecycle
|
||||
lc, err = globalLifecycleSys.Get(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
arn := getLifecycleTransitionTargetArn(ctx, lc, bucket, lifecycle.ObjectOpts{
|
||||
Name: object,
|
||||
UserTags: oi.UserTags,
|
||||
ModTime: oi.ModTime,
|
||||
VersionID: oi.VersionID,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
IsLatest: oi.IsLatest,
|
||||
})
|
||||
if arn == nil {
|
||||
return nil, fmt.Errorf("remote target not configured")
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arn.String())
|
||||
if tgt == nil {
|
||||
return nil, fmt.Errorf("remote target not configured")
|
||||
}
|
||||
fn, off, length, err := NewGetObjectReader(rs, oi, opts)
|
||||
if err != nil {
|
||||
return nil, ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
gopts := miniogo.GetObjectOptions{VersionID: opts.VersionID}
|
||||
|
||||
// get correct offsets for encrypted object
|
||||
if off >= 0 && length >= 0 {
|
||||
if err := gopts.SetRange(off, off+length-1); err != nil {
|
||||
return nil, ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
}
|
||||
|
||||
reader, err := tgt.GetObject(ctx, arn.Bucket, object, gopts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
closeReader := func() { reader.Close() }
|
||||
|
||||
return fn(reader, h, opts.CheckPrecondFn, closeReader)
|
||||
}
|
||||
|
||||
// RestoreRequestType represents type of restore.
|
||||
type RestoreRequestType string
|
||||
|
||||
const (
|
||||
// SelectRestoreRequest specifies select request. This is the only valid value
|
||||
SelectRestoreRequest RestoreRequestType = "SELECT"
|
||||
)
|
||||
|
||||
// Encryption specifies encryption setting on restored bucket
|
||||
type Encryption struct {
|
||||
EncryptionType sse.SSEAlgorithm `xml:"EncryptionType"`
|
||||
KMSContext string `xml:"KMSContext,omitempty"`
|
||||
KMSKeyID string `xml:"KMSKeyId,omitempty"`
|
||||
}
|
||||
|
||||
// MetadataEntry denotes name and value.
|
||||
type MetadataEntry struct {
|
||||
Name string `xml:"Name"`
|
||||
Value string `xml:"Value"`
|
||||
}
|
||||
|
||||
// S3Location specifies s3 location that receives result of a restore object request
|
||||
type S3Location struct {
|
||||
BucketName string `xml:"BucketName,omitempty"`
|
||||
Encryption Encryption `xml:"Encryption,omitempty"`
|
||||
Prefix string `xml:"Prefix,omitempty"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"`
|
||||
Tagging *tags.Tags `xml:"Tagging,omitempty"`
|
||||
UserMetadata []MetadataEntry `xml:"UserMetadata"`
|
||||
}
|
||||
|
||||
// OutputLocation specifies bucket where object needs to be restored
|
||||
type OutputLocation struct {
|
||||
S3 S3Location `xml:"S3,omitempty"`
|
||||
}
|
||||
|
||||
// IsEmpty returns true if output location not specified.
|
||||
func (o *OutputLocation) IsEmpty() bool {
|
||||
return o.S3.BucketName == ""
|
||||
}
|
||||
|
||||
// SelectParameters specifies sql select parameters
|
||||
type SelectParameters struct {
|
||||
s3select.S3Select
|
||||
}
|
||||
|
||||
// IsEmpty returns true if no select parameters set
|
||||
func (sp *SelectParameters) IsEmpty() bool {
|
||||
return sp == nil || sp.S3Select == s3select.S3Select{}
|
||||
}
|
||||
|
||||
var (
|
||||
selectParamsXMLName = "SelectParameters"
|
||||
)
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (sp *SelectParameters) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// Essentially the same as S3Select barring the xml name.
|
||||
if start.Name.Local == selectParamsXMLName {
|
||||
start.Name = xml.Name{Space: "", Local: "SelectRequest"}
|
||||
}
|
||||
return sp.S3Select.UnmarshalXML(d, start)
|
||||
}
|
||||
|
||||
// RestoreObjectRequest - xml to restore a transitioned object
|
||||
type RestoreObjectRequest struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest" json:"-"`
|
||||
Days int `xml:"Days,omitempty"`
|
||||
Type RestoreRequestType `xml:"Type,omitempty"`
|
||||
Tier string `xml:"Tier,-"`
|
||||
Description string `xml:"Description,omitempty"`
|
||||
SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"`
|
||||
OutputLocation OutputLocation `xml:"OutputLocation,omitempty"`
|
||||
}
|
||||
|
||||
// Maximum 2MiB size per restore object request.
|
||||
const maxRestoreObjectRequestSize = 2 << 20
|
||||
|
||||
// parseRestoreRequest parses RestoreObjectRequest from xml
|
||||
func parseRestoreRequest(reader io.Reader) (*RestoreObjectRequest, error) {
|
||||
req := RestoreObjectRequest{}
|
||||
if err := xml.NewDecoder(io.LimitReader(reader, maxRestoreObjectRequestSize)).Decode(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &req, nil
|
||||
}
|
||||
|
||||
// validate a RestoreObjectRequest as per AWS S3 spec https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
|
||||
func (r *RestoreObjectRequest) validate(ctx context.Context, objAPI ObjectLayer) error {
|
||||
if r.Type != SelectRestoreRequest && !r.SelectParameters.IsEmpty() {
|
||||
return fmt.Errorf("Select parameters can only be specified with SELECT request type")
|
||||
}
|
||||
if r.Type == SelectRestoreRequest && r.SelectParameters.IsEmpty() {
|
||||
return fmt.Errorf("SELECT restore request requires select parameters to be specified")
|
||||
}
|
||||
|
||||
if r.Type != SelectRestoreRequest && !r.OutputLocation.IsEmpty() {
|
||||
return fmt.Errorf("OutputLocation required only for SELECT request type")
|
||||
}
|
||||
if r.Type == SelectRestoreRequest && r.OutputLocation.IsEmpty() {
|
||||
return fmt.Errorf("OutputLocation required for SELECT requests")
|
||||
}
|
||||
|
||||
if r.Days != 0 && r.Type == SelectRestoreRequest {
|
||||
return fmt.Errorf("Days cannot be specified with SELECT restore request")
|
||||
}
|
||||
if r.Days == 0 && r.Type != SelectRestoreRequest {
|
||||
return fmt.Errorf("restoration days should be at least 1")
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if !r.OutputLocation.IsEmpty() {
|
||||
if _, err := objAPI.GetBucketInfo(ctx, r.OutputLocation.S3.BucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.OutputLocation.S3.Prefix == "" {
|
||||
return fmt.Errorf("Prefix is a required parameter in OutputLocation")
|
||||
}
|
||||
if r.OutputLocation.S3.Encryption.EncryptionType != xhttp.AmzEncryptionAES {
|
||||
return NotImplemented{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// set ObjectOptions for PUT call to restore temporary copy of transitioned data
|
||||
func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo ObjectInfo) (putOpts ObjectOptions) {
|
||||
meta := make(map[string]string)
|
||||
sc := rreq.OutputLocation.S3.StorageClass
|
||||
if sc == "" {
|
||||
sc = objInfo.StorageClass
|
||||
}
|
||||
meta[strings.ToLower(xhttp.AmzStorageClass)] = sc
|
||||
|
||||
if rreq.Type == SelectRestoreRequest {
|
||||
for _, v := range rreq.OutputLocation.S3.UserMetadata {
|
||||
if !strings.HasPrefix("x-amz-meta", strings.ToLower(v.Name)) {
|
||||
meta["x-amz-meta-"+v.Name] = v.Value
|
||||
continue
|
||||
}
|
||||
meta[v.Name] = v.Value
|
||||
}
|
||||
meta[xhttp.AmzObjectTagging] = rreq.OutputLocation.S3.Tagging.String()
|
||||
if rreq.OutputLocation.S3.Encryption.EncryptionType != "" {
|
||||
meta[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionAES
|
||||
}
|
||||
return ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
UserDefined: meta,
|
||||
}
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
meta[k] = v
|
||||
}
|
||||
meta[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
|
||||
return ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
UserDefined: meta,
|
||||
VersionID: objInfo.VersionID,
|
||||
MTime: objInfo.ModTime,
|
||||
Expires: objInfo.Expires,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
errRestoreHDRMissing = fmt.Errorf("x-amz-restore header not found")
|
||||
errRestoreHDRMalformed = fmt.Errorf("x-amz-restore header malformed")
|
||||
)
|
||||
|
||||
// parse x-amz-restore header from user metadata to get the status of ongoing request and expiry of restoration
|
||||
// if any. This header value is of format: ongoing-request=true|false, expires=time
|
||||
func parseRestoreHeaderFromMeta(meta map[string]string) (ongoing bool, expiry time.Time, err error) {
|
||||
restoreHdr, ok := meta[xhttp.AmzRestore]
|
||||
if !ok {
|
||||
return ongoing, expiry, errRestoreHDRMissing
|
||||
}
|
||||
rslc := strings.SplitN(restoreHdr, ",", 2)
|
||||
if len(rslc) != 2 {
|
||||
return ongoing, expiry, errRestoreHDRMalformed
|
||||
}
|
||||
rstatusSlc := strings.SplitN(rslc[0], "=", 2)
|
||||
if len(rstatusSlc) != 2 {
|
||||
return ongoing, expiry, errRestoreHDRMalformed
|
||||
}
|
||||
rExpSlc := strings.SplitN(rslc[1], "=", 2)
|
||||
if len(rExpSlc) != 2 {
|
||||
return ongoing, expiry, errRestoreHDRMalformed
|
||||
}
|
||||
|
||||
expiry, err = time.Parse(http.TimeFormat, rExpSlc[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return rstatusSlc[1] == "true", expiry, nil
|
||||
}
|
||||
|
||||
// restoreTransitionedObject is similar to PostObjectRestore from AWS GLACIER
|
||||
// storage class. When PostObjectRestore API is called, a temporary copy of the object
|
||||
// is restored locally to the bucket on source cluster until the restore expiry date.
|
||||
// The copy that was transitioned continues to reside in the transitioned tier.
|
||||
func restoreTransitionedObject(ctx context.Context, bucket, object string, objAPI ObjectLayer, objInfo ObjectInfo, rreq *RestoreObjectRequest, restoreExpiry time.Time) error {
|
||||
var rs *HTTPRangeSpec
|
||||
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, objInfo, ObjectOptions{
|
||||
VersionID: objInfo.VersionID})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gr.Close()
|
||||
hashReader, err := hash.NewReader(gr, objInfo.Size, "", "", objInfo.Size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pReader := NewPutObjReader(hashReader)
|
||||
opts := putRestoreOpts(bucket, object, rreq, objInfo)
|
||||
opts.UserDefined[xhttp.AmzRestore] = fmt.Sprintf("ongoing-request=%t, expiry-date=%s", false, restoreExpiry.Format(http.TimeFormat))
|
||||
if _, err := objAPI.PutObject(ctx, bucket, object, pReader, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,18 +18,35 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
|
||||
g := errgroup.WithNErrs(len(objects)).WithConcurrency(500)
|
||||
_, cancel := g.WithCancelOnError(ctx)
|
||||
defer cancel()
|
||||
for index := range objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
size, err := objects[index].GetActualSize()
|
||||
if err == nil {
|
||||
objects[index].Size = size
|
||||
}
|
||||
objects[index].ETag = objects[index].GetActualETag(nil)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
g.WaitErr()
|
||||
}
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
// if one of the args do not meet the required conditions.
|
||||
// Special conditions required by MinIO server are as below
|
||||
@@ -58,7 +75,7 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int
|
||||
func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectVersions")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectVersions", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -69,7 +86,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketVersionsAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -100,16 +117,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectVersionsInfo.Objects {
|
||||
if crypto.IsEncrypted(listObjectVersionsInfo.Objects[i].UserDefined) {
|
||||
listObjectVersionsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectVersionsInfo.Objects[i], false)
|
||||
}
|
||||
listObjectVersionsInfo.Objects[i].Size, err = listObjectVersionsInfo.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
concurrentDecryptETag(ctx, listObjectVersionsInfo.Objects)
|
||||
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo)
|
||||
|
||||
@@ -120,7 +128,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
// ListObjectsV2MHandler - GET Bucket (List Objects) Version 2 with metadata.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
// of the objects in a bucket. You can use the request parame<ters as selection
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
// NOTE: It is recommended that this API to be used for application development.
|
||||
@@ -128,7 +136,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV2M")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV2M", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -160,13 +168,6 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
// Analyze continuation token and route the request accordingly
|
||||
var success bool
|
||||
token, success = proxyRequestByToken(ctx, w, r, token)
|
||||
if success {
|
||||
return
|
||||
}
|
||||
|
||||
listObjectsV2 := objectAPI.ListObjectsV2
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
@@ -178,22 +179,10 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsV2Info.Objects {
|
||||
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
|
||||
}
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
if nextContinuationToken != "" && listObjectsV2Info.IsTruncated {
|
||||
nextContinuationToken = fmt.Sprintf("%s@%d", listObjectsV2Info.NextContinuationToken, getLocalNodeIndex())
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, nextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
@@ -214,7 +203,7 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV2", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -246,13 +235,6 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
// Analyze continuation token and route the request accordingly
|
||||
var success bool
|
||||
token, success = proxyRequestByToken(ctx, w, r, token)
|
||||
if success {
|
||||
return
|
||||
}
|
||||
|
||||
listObjectsV2 := objectAPI.ListObjectsV2
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
@@ -264,24 +246,9 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsV2Info.Objects {
|
||||
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
|
||||
}
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
if nextContinuationToken != "" && listObjectsV2Info.IsTruncated {
|
||||
nextContinuationToken = fmt.Sprintf("%s@%d", listObjectsV2Info.NextContinuationToken, getLocalNodeIndex())
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, nextContinuationToken, startAfter,
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, false)
|
||||
|
||||
@@ -289,18 +256,6 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
}
|
||||
|
||||
func getLocalNodeIndex() int {
|
||||
if len(globalProxyEndpoints) == 0 {
|
||||
return -1
|
||||
}
|
||||
for i, ep := range globalProxyEndpoints {
|
||||
if ep.IsLocal {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func parseRequestToken(token string) (subToken string, nodeIndex int) {
|
||||
if token == "" {
|
||||
return token, -1
|
||||
@@ -339,10 +294,6 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http
|
||||
return proxyRequest(ctx, w, r, ep)
|
||||
}
|
||||
|
||||
func proxyRequestByBucket(ctx context.Context, w http.ResponseWriter, r *http.Request, bucket string) (success bool) {
|
||||
return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(bucket, len(globalProxyEndpoints)))
|
||||
}
|
||||
|
||||
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
@@ -352,7 +303,7 @@ func proxyRequestByBucket(ctx context.Context, w http.ResponseWriter, r *http.Re
|
||||
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV1")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV1", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -381,10 +332,6 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
if proxyRequestByBucket(ctx, w, r, bucket) {
|
||||
return
|
||||
}
|
||||
|
||||
listObjects := objectAPI.ListObjects
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
@@ -396,16 +343,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsInfo.Objects {
|
||||
if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
||||
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
|
||||
}
|
||||
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
concurrentDecryptETag(ctx, listObjectsInfo.Objects)
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
|
||||
|
||||
@@ -23,11 +23,14 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/tags"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
@@ -47,6 +50,7 @@ func (sys *BucketMetadataSys) Remove(bucket string) {
|
||||
}
|
||||
sys.Lock()
|
||||
delete(sys.metadataMap, bucket)
|
||||
globalBucketMonitor.DeleteBucket(bucket)
|
||||
sys.Unlock()
|
||||
}
|
||||
|
||||
@@ -70,7 +74,7 @@ func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) {
|
||||
// Update update bucket metadata for the specified config file.
|
||||
// The configData data should not be modified after being sent here.
|
||||
func (sys *BucketMetadataSys) Update(bucket string, configFile string, configData []byte) error {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -79,7 +83,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
// This code is needed only for gateway implementations.
|
||||
switch configFile {
|
||||
case bucketSSEConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -88,7 +92,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketLifecycleConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -97,7 +101,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketTaggingConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -106,7 +110,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketNotificationConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -147,12 +151,28 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
meta.EncryptionConfigXML = configData
|
||||
case bucketTaggingConfig:
|
||||
meta.TaggingConfigXML = configData
|
||||
case objectLockConfig:
|
||||
meta.ObjectLockConfigXML = configData
|
||||
case bucketVersioningConfig:
|
||||
meta.VersioningConfigXML = configData
|
||||
case bucketQuotaConfigFile:
|
||||
meta.QuotaConfigJSON = configData
|
||||
case objectLockConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.ObjectLockConfigXML = configData
|
||||
case bucketVersioningConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.VersioningConfigXML = configData
|
||||
case bucketReplicationConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.ReplicationConfigXML = configData
|
||||
case bucketTargetsFile:
|
||||
meta.BucketTargetsConfigJSON, meta.BucketTargetsConfigMetaJSON, err = encryptBucketMetadata(meta.Name, configData, crypto.Context{bucket: meta.Name, bucketTargetsFile: bucketTargetsFile})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error encrypting bucket target metadata %w", err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
}
|
||||
@@ -171,6 +191,12 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
// If no metadata exists errConfigNotFound is returned and a new metadata is returned.
|
||||
// Only a shallow copy is returned, so referenced data should not be modified,
|
||||
// but can be replaced atomically.
|
||||
//
|
||||
// This function should only be used with
|
||||
// - GetBucketInfo
|
||||
// - ListBuckets
|
||||
// For all other bucket specific metadata, use the relevant
|
||||
// calls implemented specifically for each of those features.
|
||||
func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
|
||||
if globalIsGateway || bucket == minioMetaBucket {
|
||||
return newBucketMetadata(bucket), errConfigNotFound
|
||||
@@ -248,9 +274,9 @@ func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Life
|
||||
// GetNotificationConfig returns configured notification config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Config, error) {
|
||||
if globalIsGateway && globalGatewayName == "nas" {
|
||||
if globalIsGateway && globalGatewayName == NASBackendGateway {
|
||||
// Only needed in case of NAS gateway.
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -288,7 +314,7 @@ func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEC
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -318,10 +344,54 @@ func (sys *BucketMetadataSys) GetQuotaConfig(bucket string) (*madmin.BucketQuota
|
||||
return meta.quotaConfig, nil
|
||||
}
|
||||
|
||||
// GetConfig returns the current bucket metadata
|
||||
// GetReplicationConfig returns configured bucket replication config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket string) (*replication.Config, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if meta.replicationConfig == nil {
|
||||
return nil, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.replicationConfig, nil
|
||||
}
|
||||
|
||||
// GetBucketTargetsConfig returns configured bucket targets for this bucket
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.BucketTargets, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if meta.bucketTargetConfig == nil {
|
||||
return nil, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.bucketTargetConfig, nil
|
||||
}
|
||||
|
||||
// GetBucketTarget returns the target for the bucket and arn.
|
||||
func (sys *BucketMetadataSys) GetBucketTarget(bucket string, arn string) (madmin.BucketTarget, error) {
|
||||
targets, err := sys.GetBucketTargetsConfig(bucket)
|
||||
if err != nil {
|
||||
return madmin.BucketTarget{}, err
|
||||
}
|
||||
for _, t := range targets.Targets {
|
||||
if t.Arn == arn {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
return madmin.BucketTarget{}, errConfigNotFound
|
||||
}
|
||||
|
||||
// GetConfig returns a specific configuration from the bucket metadata.
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetConfig(bucket string) (BucketMetadata, error) {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return newBucketMetadata(bucket), errServerNotInitialized
|
||||
}
|
||||
@@ -362,16 +432,21 @@ func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, ob
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load PolicySys once during boot.
|
||||
return sys.load(ctx, buckets, objAPI)
|
||||
// Load bucket metadata sys in background
|
||||
go sys.load(ctx, buckets, objAPI)
|
||||
return nil
|
||||
}
|
||||
|
||||
// concurrently load bucket metadata to speed up loading bucket metadata.
|
||||
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) {
|
||||
g := errgroup.WithNErrs(len(buckets))
|
||||
for index := range buckets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
_, _ = objAPI.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{
|
||||
// Ensure heal opts for bucket metadata be deep healed all the time.
|
||||
ScanMode: madmin.HealDeepScan,
|
||||
})
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, buckets[index].Name)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -384,26 +459,33 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
}
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
return err
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loads bucket metadata for all buckets into BucketMetadataSys.
|
||||
func (sys *BucketMetadataSys) load(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
func (sys *BucketMetadataSys) load(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) {
|
||||
count := 100 // load 100 bucket metadata at a time.
|
||||
for {
|
||||
if len(buckets) < count {
|
||||
return sys.concurrentLoad(ctx, buckets, objAPI)
|
||||
}
|
||||
if err := sys.concurrentLoad(ctx, buckets[:count], objAPI); err != nil {
|
||||
return err
|
||||
sys.concurrentLoad(ctx, buckets, objAPI)
|
||||
return
|
||||
}
|
||||
sys.concurrentLoad(ctx, buckets[:count], objAPI)
|
||||
buckets = buckets[count:]
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the state of the BucketMetadataSys.
|
||||
func (sys *BucketMetadataSys) Reset() {
|
||||
sys.Lock()
|
||||
for k := range sys.metadataMap {
|
||||
delete(sys.metadataMap, k)
|
||||
}
|
||||
sys.Unlock()
|
||||
}
|
||||
|
||||
// NewBucketMetadataSys - creates new policy system.
|
||||
func NewBucketMetadataSys() *BucketMetadataSys {
|
||||
return &BucketMetadataSys{
|
||||
|
||||
@@ -19,22 +19,27 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/tags"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -59,27 +64,33 @@ var (
|
||||
// bucketMetadataFormat refers to the format.
|
||||
// bucketMetadataVersion can be used to track a rolling upgrade of a field.
|
||||
type BucketMetadata struct {
|
||||
Name string
|
||||
Created time.Time
|
||||
LockEnabled bool // legacy not used anymore.
|
||||
PolicyConfigJSON []byte
|
||||
NotificationConfigXML []byte
|
||||
LifecycleConfigXML []byte
|
||||
ObjectLockConfigXML []byte
|
||||
VersioningConfigXML []byte
|
||||
EncryptionConfigXML []byte
|
||||
TaggingConfigXML []byte
|
||||
QuotaConfigJSON []byte
|
||||
Name string
|
||||
Created time.Time
|
||||
LockEnabled bool // legacy not used anymore.
|
||||
PolicyConfigJSON []byte
|
||||
NotificationConfigXML []byte
|
||||
LifecycleConfigXML []byte
|
||||
ObjectLockConfigXML []byte
|
||||
VersioningConfigXML []byte
|
||||
EncryptionConfigXML []byte
|
||||
TaggingConfigXML []byte
|
||||
QuotaConfigJSON []byte
|
||||
ReplicationConfigXML []byte
|
||||
BucketTargetsConfigJSON []byte
|
||||
BucketTargetsConfigMetaJSON []byte
|
||||
|
||||
// Unexported fields. Must be updated atomically.
|
||||
policyConfig *policy.Policy
|
||||
notificationConfig *event.Config
|
||||
lifecycleConfig *lifecycle.Lifecycle
|
||||
objectLockConfig *objectlock.Config
|
||||
versioningConfig *versioning.Versioning
|
||||
sseConfig *bucketsse.BucketSSEConfig
|
||||
taggingConfig *tags.Tags
|
||||
quotaConfig *madmin.BucketQuota
|
||||
policyConfig *policy.Policy
|
||||
notificationConfig *event.Config
|
||||
lifecycleConfig *lifecycle.Lifecycle
|
||||
objectLockConfig *objectlock.Config
|
||||
versioningConfig *versioning.Versioning
|
||||
sseConfig *bucketsse.BucketSSEConfig
|
||||
taggingConfig *tags.Tags
|
||||
quotaConfig *madmin.BucketQuota
|
||||
replicationConfig *replication.Config
|
||||
bucketTargetConfig *madmin.BucketTargets
|
||||
bucketTargetConfigMeta map[string]string
|
||||
}
|
||||
|
||||
// newBucketMetadata creates BucketMetadata with the supplied name and Created to Now.
|
||||
@@ -94,12 +105,18 @@ func newBucketMetadata(name string) BucketMetadata {
|
||||
versioningConfig: &versioning.Versioning{
|
||||
XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/",
|
||||
},
|
||||
bucketTargetConfig: &madmin.BucketTargets{},
|
||||
bucketTargetConfigMeta: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Load - loads the metadata of bucket by name from ObjectLayer api.
|
||||
// If an error is returned the returned metadata will be default initialized.
|
||||
func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string) error {
|
||||
if name == "" {
|
||||
logger.LogIf(ctx, errors.New("bucket name cannot be empty"))
|
||||
return errors.New("bucket name cannot be empty")
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
|
||||
data, err := readConfig(ctx, api, configFile)
|
||||
if err != nil {
|
||||
@@ -119,26 +136,26 @@ func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string)
|
||||
default:
|
||||
return fmt.Errorf("loadBucketMetadata: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
|
||||
}
|
||||
|
||||
// OK, parse data.
|
||||
_, err = b.UnmarshalMsg(data[4:])
|
||||
b.Name = name // in-case parsing failed for some reason, make sure bucket name is not empty.
|
||||
return err
|
||||
}
|
||||
|
||||
// loadBucketMetadata loads and migrates to bucket metadata.
|
||||
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (BucketMetadata, error) {
|
||||
b := newBucketMetadata(bucket)
|
||||
err := b.Load(ctx, objectAPI, bucket)
|
||||
if err == nil {
|
||||
return b, b.convertLegacyConfigs(ctx, objectAPI)
|
||||
}
|
||||
|
||||
if err != errConfigNotFound {
|
||||
err := b.Load(ctx, objectAPI, b.Name)
|
||||
if err != nil && !errors.Is(err, errConfigNotFound) {
|
||||
return b, err
|
||||
}
|
||||
|
||||
// Old bucket without bucket metadata. Hence we migrate existing settings.
|
||||
return b, b.convertLegacyConfigs(ctx, objectAPI)
|
||||
if err := b.convertLegacyConfigs(ctx, objectAPI); err != nil {
|
||||
return b, err
|
||||
}
|
||||
// migrate unencrypted remote targets
|
||||
return b, b.migrateTargetConfig(ctx, objectAPI)
|
||||
}
|
||||
|
||||
// parseAllConfigs will parse all configs and populate the private fields.
|
||||
@@ -213,6 +230,23 @@ func (b *BucketMetadata) parseAllConfigs(ctx context.Context, objectAPI ObjectLa
|
||||
}
|
||||
}
|
||||
|
||||
if len(b.ReplicationConfigXML) != 0 {
|
||||
b.replicationConfig, err = replication.ParseConfig(bytes.NewReader(b.ReplicationConfigXML))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
b.replicationConfig = nil
|
||||
}
|
||||
|
||||
if len(b.BucketTargetsConfigJSON) != 0 {
|
||||
b.bucketTargetConfig, err = parseBucketTargetConfig(b.Name, b.BucketTargetsConfigJSON, b.BucketTargetsConfigMetaJSON)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
b.bucketTargetConfig = &madmin.BucketTargets{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -225,6 +259,8 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
bucketQuotaConfigFile,
|
||||
bucketSSEConfig,
|
||||
bucketTaggingConfig,
|
||||
bucketReplicationConfig,
|
||||
bucketTargetsFile,
|
||||
objectLockConfig,
|
||||
}
|
||||
|
||||
@@ -242,6 +278,12 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectExistsAsDirectory:
|
||||
// in FS mode it possible that we have actual
|
||||
// files in this folder with `.minio.sys/buckets/bucket/configFile`
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
// legacy file config not found, proceed to look for new metadata.
|
||||
continue
|
||||
@@ -281,6 +323,10 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
b.VersioningConfigXML = enabledBucketVersioningConfig
|
||||
case bucketQuotaConfigFile:
|
||||
b.QuotaConfigJSON = configData
|
||||
case bucketReplicationConfig:
|
||||
b.ReplicationConfigXML = configData
|
||||
case bucketTargetsFile:
|
||||
b.BucketTargetsConfigJSON = configData
|
||||
}
|
||||
}
|
||||
|
||||
@@ -322,7 +368,7 @@ func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
|
||||
|
||||
// deleteBucketMetadata deletes bucket metadata
|
||||
// If config does not exist no error is returned.
|
||||
func deleteBucketMetadata(ctx context.Context, obj ObjectLayer, bucket string) error {
|
||||
func deleteBucketMetadata(ctx context.Context, obj objectDeleter, bucket string) error {
|
||||
metadataFiles := []string{
|
||||
dataUsageCacheName,
|
||||
bucketMetadataFile,
|
||||
@@ -335,3 +381,76 @@ func deleteBucketMetadata(ctx context.Context, obj ObjectLayer, bucket string) e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrate config for remote targets by encrypting data if currently unencrypted and kms is configured.
|
||||
func (b *BucketMetadata) migrateTargetConfig(ctx context.Context, objectAPI ObjectLayer) error {
|
||||
var err error
|
||||
// early return if no targets or already encrypted
|
||||
if len(b.BucketTargetsConfigJSON) == 0 || GlobalKMS == nil || len(b.BucketTargetsConfigMetaJSON) != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
encBytes, metaBytes, err := encryptBucketMetadata(b.Name, b.BucketTargetsConfigJSON, crypto.Context{b.Name: b.Name, bucketTargetsFile: bucketTargetsFile})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.BucketTargetsConfigJSON = encBytes
|
||||
b.BucketTargetsConfigMetaJSON = metaBytes
|
||||
return b.Save(ctx, objectAPI)
|
||||
}
|
||||
|
||||
// encrypt bucket metadata if kms is configured.
|
||||
func encryptBucketMetadata(bucket string, input []byte, kmsContext crypto.Context) (output, metabytes []byte, err error) {
|
||||
var sealedKey crypto.SealedKey
|
||||
if GlobalKMS == nil {
|
||||
output = input
|
||||
return
|
||||
}
|
||||
var (
|
||||
key [32]byte
|
||||
encKey []byte
|
||||
)
|
||||
metadata := make(map[string]string)
|
||||
key, encKey, err = GlobalKMS.GenerateKey(GlobalKMS.DefaultKeyID(), kmsContext)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
outbuf := bytes.NewBuffer(nil)
|
||||
objectKey := crypto.GenerateKey(key, rand.Reader)
|
||||
sealedKey = objectKey.Seal(key, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, "")
|
||||
crypto.S3.CreateMetadata(metadata, GlobalKMS.DefaultKeyID(), encKey, sealedKey)
|
||||
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
|
||||
if err != nil {
|
||||
return output, metabytes, err
|
||||
}
|
||||
metabytes, err = json.Marshal(metadata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return outbuf.Bytes(), metabytes, nil
|
||||
}
|
||||
|
||||
// decrypt bucket metadata if kms is configured.
|
||||
func decryptBucketMetadata(input []byte, bucket string, meta map[string]string, kmsContext crypto.Context) ([]byte, error) {
|
||||
if GlobalKMS == nil {
|
||||
return nil, errKMSNotConfigured
|
||||
}
|
||||
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(meta)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extKey, err := GlobalKMS.UnsealKey(keyID, kmsKey, kmsContext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var objectKey crypto.ObjectKey
|
||||
if err = objectKey.Unseal(extKey, sealedKey, crypto.S3.String(), bucket, ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outbuf := bytes.NewBuffer(nil)
|
||||
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
|
||||
|
||||
return outbuf.Bytes(), err
|
||||
}
|
||||
|
||||
@@ -90,6 +90,24 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "QuotaConfigJSON")
|
||||
return
|
||||
}
|
||||
case "ReplicationConfigXML":
|
||||
z.ReplicationConfigXML, err = dc.ReadBytes(z.ReplicationConfigXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationConfigXML")
|
||||
return
|
||||
}
|
||||
case "BucketTargetsConfigJSON":
|
||||
z.BucketTargetsConfigJSON, err = dc.ReadBytes(z.BucketTargetsConfigJSON)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigJSON")
|
||||
return
|
||||
}
|
||||
case "BucketTargetsConfigMetaJSON":
|
||||
z.BucketTargetsConfigMetaJSON, err = dc.ReadBytes(z.BucketTargetsConfigMetaJSON)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -103,9 +121,9 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 11
|
||||
// map header, size 14
|
||||
// write "Name"
|
||||
err = en.Append(0x8b, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
err = en.Append(0x8e, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -214,15 +232,45 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "QuotaConfigJSON")
|
||||
return
|
||||
}
|
||||
// write "ReplicationConfigXML"
|
||||
err = en.Append(0xb4, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBytes(z.ReplicationConfigXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationConfigXML")
|
||||
return
|
||||
}
|
||||
// write "BucketTargetsConfigJSON"
|
||||
err = en.Append(0xb7, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBytes(z.BucketTargetsConfigJSON)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigJSON")
|
||||
return
|
||||
}
|
||||
// write "BucketTargetsConfigMetaJSON"
|
||||
err = en.Append(0xbb, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x4a, 0x53, 0x4f, 0x4e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBytes(z.BucketTargetsConfigMetaJSON)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 11
|
||||
// map header, size 14
|
||||
// string "Name"
|
||||
o = append(o, 0x8b, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = append(o, 0x8e, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
// string "Created"
|
||||
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
|
||||
@@ -254,6 +302,15 @@ func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "QuotaConfigJSON"
|
||||
o = append(o, 0xaf, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
|
||||
o = msgp.AppendBytes(o, z.QuotaConfigJSON)
|
||||
// string "ReplicationConfigXML"
|
||||
o = append(o, 0xb4, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
|
||||
o = msgp.AppendBytes(o, z.ReplicationConfigXML)
|
||||
// string "BucketTargetsConfigJSON"
|
||||
o = append(o, 0xb7, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
|
||||
o = msgp.AppendBytes(o, z.BucketTargetsConfigJSON)
|
||||
// string "BucketTargetsConfigMetaJSON"
|
||||
o = append(o, 0xbb, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x4a, 0x53, 0x4f, 0x4e)
|
||||
o = msgp.AppendBytes(o, z.BucketTargetsConfigMetaJSON)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -341,6 +398,24 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "QuotaConfigJSON")
|
||||
return
|
||||
}
|
||||
case "ReplicationConfigXML":
|
||||
z.ReplicationConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.ReplicationConfigXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationConfigXML")
|
||||
return
|
||||
}
|
||||
case "BucketTargetsConfigJSON":
|
||||
z.BucketTargetsConfigJSON, bts, err = msgp.ReadBytesBytes(bts, z.BucketTargetsConfigJSON)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigJSON")
|
||||
return
|
||||
}
|
||||
case "BucketTargetsConfigMetaJSON":
|
||||
z.BucketTargetsConfigMetaJSON, bts, err = msgp.ReadBytesBytes(bts, z.BucketTargetsConfigMetaJSON)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -355,6 +430,6 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BucketMetadata) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON)
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -17,15 +17,12 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
@@ -42,7 +39,7 @@ const (
|
||||
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketNotification", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
@@ -114,7 +111,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketNotification", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
@@ -173,137 +170,3 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListenBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListenBucketNotification", mustGetClaimsFromToken(r))
|
||||
|
||||
// Validate if bucket exists.
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsListenBucketSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
values := r.URL.Query()
|
||||
values.Set(peerRESTListenBucket, bucketName)
|
||||
|
||||
var prefix string
|
||||
if len(values[peerRESTListenPrefix]) > 1 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNamePrefix), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if len(values[peerRESTListenPrefix]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values[peerRESTListenPrefix][0]); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
prefix = values[peerRESTListenPrefix][0]
|
||||
}
|
||||
|
||||
var suffix string
|
||||
if len(values[peerRESTListenSuffix]) > 1 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNameSuffix), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if len(values[peerRESTListenSuffix]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values[peerRESTListenSuffix][0]); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
suffix = values[peerRESTListenSuffix][0]
|
||||
}
|
||||
|
||||
pattern := event.NewPattern(prefix, suffix)
|
||||
|
||||
var eventNames []event.Name
|
||||
for _, s := range values[peerRESTListenEvents] {
|
||||
eventName, err := event.ParseName(s)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
eventNames = append(eventNames, eventName)
|
||||
}
|
||||
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucketName); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
rulesMap := event.NewRulesMap(eventNames, pattern, event.TargetID{ID: mustGetUUID()})
|
||||
|
||||
w.Header().Set(xhttp.ContentType, "text/event-stream")
|
||||
|
||||
// Listen Publisher and peer-listen-client uses nonblocking send and hence does not wait for slow receivers.
|
||||
// Use buffered channel to take care of burst sends or slow w.Write()
|
||||
listenCh := make(chan interface{}, 4000)
|
||||
|
||||
peers := newPeerRestClients(globalEndpoints)
|
||||
|
||||
globalHTTPListen.Subscribe(listenCh, ctx.Done(), func(evI interface{}) bool {
|
||||
ev, ok := evI.(event.Event)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if ev.S3.Bucket.Name != values.Get(peerRESTListenBucket) {
|
||||
return false
|
||||
}
|
||||
return rulesMap.MatchSimple(ev.EventName, ev.S3.Object.Key)
|
||||
})
|
||||
|
||||
for _, peer := range peers {
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
peer.Listen(listenCh, ctx.Done(), values)
|
||||
}
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
for {
|
||||
select {
|
||||
case evI := <-listenCh:
|
||||
ev := evI.(event.Event)
|
||||
if len(string(ev.EventName)) > 0 {
|
||||
if err := enc.Encode(struct{ Records []event.Event }{[]event.Event{ev}}); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-keepAliveTicker.C:
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -21,10 +21,12 @@ import (
|
||||
"math"
|
||||
"net/http"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
)
|
||||
|
||||
// BucketObjectLockSys - map of bucket and retention configuration.
|
||||
@@ -33,7 +35,7 @@ type BucketObjectLockSys struct{}
|
||||
// Get - Get retention configuration.
|
||||
func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return r, errServerNotInitialized
|
||||
}
|
||||
@@ -80,24 +82,25 @@ func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locke
|
||||
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
|
||||
// governance bypass headers are set and user has governance bypass permissions.
|
||||
// Objects in "Compliance" mode can be overwritten only if retention date is past.
|
||||
func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket string, object ObjectToDelete, getObjectInfoFn GetObjectInfoFn) APIErrorCode {
|
||||
func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket string, object ObjectToDelete, oi ObjectInfo, gerr error) APIErrorCode {
|
||||
opts, err := getOpts(ctx, r, bucket, object.ObjectName)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
opts.VersionID = object.VersionID
|
||||
|
||||
oi, err := getObjectInfoFn(ctx, bucket, object.ObjectName, opts)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
if gerr != nil { // error from GetObjectInfo
|
||||
switch gerr.(type) {
|
||||
case MethodNotAllowed: // This happens usually for a delete marker
|
||||
if oi.DeleteMarker {
|
||||
// Delete marker should be present and valid.
|
||||
return ErrNone
|
||||
}
|
||||
}
|
||||
return toAPIErrorCode(ctx, err)
|
||||
if isErrObjectNotFound(gerr) || isErrVersionNotFound(gerr) {
|
||||
return ErrNone
|
||||
}
|
||||
return toAPIErrorCode(ctx, gerr)
|
||||
}
|
||||
|
||||
lhold := objectlock.GetObjectLegalHoldMeta(oi.UserDefined)
|
||||
@@ -245,13 +248,13 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket,
|
||||
// For objects in "Compliance" mode, retention date cannot be shortened, and mode cannot be altered.
|
||||
// For objects with legal hold header set, the s3:PutObjectLegalHold permission is expected to be set
|
||||
// Both legal hold and retention can be applied independently on an object
|
||||
func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, retentionPermErr, legalHoldPermErr APIErrorCode) (objectlock.RetMode, objectlock.RetentionDate, objectlock.ObjectLegalHold, APIErrorCode) {
|
||||
func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, retentionPermErr, legalHoldPermErr APIErrorCode) (objectlock.RetMode, objectlock.RetentionDate, objectlock.ObjectLegalHold, APIErrorCode) {
|
||||
var mode objectlock.RetMode
|
||||
var retainDate objectlock.RetentionDate
|
||||
var legalHold objectlock.ObjectLegalHold
|
||||
|
||||
retentionRequested := objectlock.IsObjectLockRetentionRequested(r.Header)
|
||||
legalHoldRequested := objectlock.IsObjectLockLegalHoldRequested(r.Header)
|
||||
retentionRequested := objectlock.IsObjectLockRetentionRequested(rq.Header)
|
||||
legalHoldRequested := objectlock.IsObjectLockLegalHoldRequested(rq.Header)
|
||||
|
||||
retentionCfg, err := globalBucketObjectLockSys.Get(bucket)
|
||||
if err != nil {
|
||||
@@ -267,25 +270,24 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
|
||||
return mode, retainDate, legalHold, ErrNone
|
||||
}
|
||||
|
||||
opts, err := getOpts(ctx, r, bucket, object)
|
||||
opts, err := getOpts(ctx, rq, bucket, object)
|
||||
if err != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
if opts.VersionID != "" {
|
||||
replica := rq.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String()
|
||||
|
||||
if opts.VersionID != "" && !replica {
|
||||
if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil {
|
||||
r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
|
||||
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return mode, retainDate, legalHold, ErrObjectLocked
|
||||
}
|
||||
|
||||
if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) {
|
||||
return mode, retainDate, legalHold, ErrObjectLocked
|
||||
}
|
||||
|
||||
mode = r.Mode
|
||||
retainDate = r.RetainUntilDate
|
||||
legalHold = objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
|
||||
@@ -298,17 +300,17 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
|
||||
|
||||
if legalHoldRequested {
|
||||
var lerr error
|
||||
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(r.Header); lerr != nil {
|
||||
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(rq.Header); lerr != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
if retentionRequested {
|
||||
legalHold, err := objectlock.ParseObjectLockLegalHoldHeaders(r.Header)
|
||||
legalHold, err := objectlock.ParseObjectLockLegalHoldHeaders(rq.Header)
|
||||
if err != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(r.Header)
|
||||
rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(rq.Header)
|
||||
if err != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
@@ -317,7 +319,9 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
|
||||
}
|
||||
return rMode, rDate, legalHold, ErrNone
|
||||
}
|
||||
|
||||
if replica { // replica inherits retention metadata only from source
|
||||
return "", objectlock.RetentionDate{}, legalHold, ErrNone
|
||||
}
|
||||
if !retentionRequested && retentionCfg.Validity > 0 {
|
||||
if retentionPermErr != ErrNone {
|
||||
return mode, retainDate, legalHold, retentionPermErr
|
||||
|
||||
@@ -40,7 +40,7 @@ const (
|
||||
func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketPolicy", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -106,7 +106,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketPolicy", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -141,7 +141,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketPolicy", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
|
||||
@@ -252,7 +252,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testCase.bucketName),
|
||||
reqV4, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", testCase.bucketName),
|
||||
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
@@ -266,7 +266,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testCase.bucketName),
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodPut, getPutPolicyURL("", testCase.bucketName),
|
||||
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
@@ -283,7 +283,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
||||
bucketPolicyStr := fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest("PUT", getPutPolicyURL("", bucketName),
|
||||
anonReq, err := newTestRequest(http.MethodPut, getPutPolicyURL("", bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)))
|
||||
|
||||
if err != nil {
|
||||
@@ -302,7 +302,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
|
||||
nilBucket := "dummy-bucket"
|
||||
|
||||
nilReq, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", nilBucket),
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
@@ -344,7 +344,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
|
||||
reqV4, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", testPolicy.bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
@@ -358,7 +358,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodPut, getPutPolicyURL("", testPolicy.bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
@@ -415,7 +415,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("GET", getGetPolicyURL("", testCase.bucketName),
|
||||
reqV4, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
@@ -454,7 +454,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("GET", getGetPolicyURL("", testCase.bucketName),
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, getGetPolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
@@ -491,7 +491,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// Test for Anonymous/unsigned http request.
|
||||
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest("GET", getPutPolicyURL("", bucketName), 0, nil)
|
||||
anonReq, err := newTestRequest(http.MethodGet, getPutPolicyURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
@@ -509,7 +509,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
|
||||
nilBucket := "dummy-bucket"
|
||||
|
||||
nilReq, err := newTestSignedRequestV4("GET", getGetPolicyURL("", nilBucket),
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
@@ -589,7 +589,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
|
||||
reqV4, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", testPolicy.bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
@@ -639,7 +639,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for Delete bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", testCase.bucketName),
|
||||
reqV4, err := newTestSignedRequestV4(http.MethodDelete, getDeletePolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
@@ -661,7 +661,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodPut, getPutPolicyURL("", testPolicy.bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
@@ -678,7 +678,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for Delete bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("DELETE", getDeletePolicyURL("", testCase.bucketName),
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodDelete, getDeletePolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
@@ -694,7 +694,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// Test for Anonymous/unsigned http request.
|
||||
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest("DELETE", getPutPolicyURL("", bucketName), 0, nil)
|
||||
anonReq, err := newTestRequest(http.MethodDelete, getPutPolicyURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
@@ -712,7 +712,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
|
||||
nilBucket := "dummy-bucket"
|
||||
|
||||
nilReq, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", nilBucket),
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodDelete, getDeletePolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
|
||||
miniogopolicy "github.com/minio/minio-go/v7/pkg/policy"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
@@ -68,6 +68,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
principalType := "Anonymous"
|
||||
if username != "" {
|
||||
principalType = "User"
|
||||
if len(claims) > 0 {
|
||||
principalType = "AssumedRole"
|
||||
}
|
||||
if username == globalActiveCred.AccessKey {
|
||||
principalType = "Account"
|
||||
}
|
||||
}
|
||||
|
||||
vid := r.URL.Query().Get("versionId")
|
||||
@@ -75,9 +81,6 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
if u, err := url.Parse(r.Header.Get(xhttp.AmzCopySource)); err == nil {
|
||||
vid = u.Query().Get("versionId")
|
||||
}
|
||||
if vid == "" {
|
||||
vid = r.Header.Get(xhttp.AmzCopySourceVersionID)
|
||||
}
|
||||
}
|
||||
|
||||
args := map[string][]string{
|
||||
@@ -146,7 +149,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
for k, v := range claims {
|
||||
vStr, ok := v.(string)
|
||||
if ok {
|
||||
args[k] = []string{vStr}
|
||||
// Special case for AD/LDAP STS users
|
||||
if k == ldapUser {
|
||||
args["user"] = []string{vStr}
|
||||
} else {
|
||||
args[k] = []string{vStr}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,9 +22,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
@@ -65,48 +63,43 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
|
||||
}
|
||||
|
||||
func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64) error {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
q, err := sys.Get(bucket)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if q.Type == madmin.FIFOQuota {
|
||||
return nil
|
||||
}
|
||||
|
||||
if q.Quota == 0 {
|
||||
// No quota set return quickly.
|
||||
return nil
|
||||
}
|
||||
|
||||
sys.bucketStorageCache.Once.Do(func() {
|
||||
sys.bucketStorageCache.TTL = 10 * time.Second
|
||||
sys.bucketStorageCache.TTL = 1 * time.Second
|
||||
sys.bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer done()
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
}
|
||||
})
|
||||
|
||||
v, err := sys.bucketStorageCache.Get()
|
||||
q, err := sys.Get(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dui := v.(DataUsageInfo)
|
||||
if q != nil && q.Type == madmin.HardQuota && q.Quota > 0 {
|
||||
v, err := sys.bucketStorageCache.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bui, ok := dui.BucketsUsage[bucket]
|
||||
if !ok {
|
||||
// bucket not found, cannot enforce quota
|
||||
// call will fail anyways later.
|
||||
return nil
|
||||
}
|
||||
dui := v.(DataUsageInfo)
|
||||
|
||||
if (bui.Size + uint64(size)) > q.Quota {
|
||||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
bui, ok := dui.BucketsUsage[bucket]
|
||||
if !ok {
|
||||
// bucket not found, cannot enforce quota
|
||||
// call will fail anyways later.
|
||||
return nil
|
||||
}
|
||||
|
||||
if (bui.Size + uint64(size)) >= q.Quota {
|
||||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -120,149 +113,112 @@ func enforceBucketQuota(ctx context.Context, bucket string, size int64) error {
|
||||
return globalBucketQuotaSys.check(ctx, bucket, size)
|
||||
}
|
||||
|
||||
const (
|
||||
bgQuotaInterval = 1 * time.Hour
|
||||
)
|
||||
|
||||
// initQuotaEnforcement starts the routine that deletes objects in bucket
|
||||
// that exceeds the FIFO quota
|
||||
func initQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) {
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
|
||||
go startBucketQuotaEnforcement(ctx, objAPI)
|
||||
}
|
||||
}
|
||||
|
||||
func startBucketQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.NewTimer(bgQuotaInterval).C:
|
||||
enforceFIFOQuota(ctx, objAPI)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// enforceFIFOQuota deletes objects in FIFO order until sufficient objects
|
||||
// have been deleted so as to bring bucket usage within quota
|
||||
func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) {
|
||||
// Turn off quota enforcement if data usage info is unavailable.
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
|
||||
// have been deleted so as to bring bucket usage within quota.
|
||||
func enforceFIFOQuotaBucket(ctx context.Context, objectAPI ObjectLayer, bucket string, bui BucketUsageInfo) {
|
||||
// Check if the current bucket has quota restrictions, if not skip it
|
||||
cfg, err := globalBucketQuotaSys.Get(bucket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBuckets(ctx)
|
||||
if cfg.Type != madmin.FIFOQuota {
|
||||
return
|
||||
}
|
||||
|
||||
var toFree uint64
|
||||
if bui.Size > cfg.Quota && cfg.Quota > 0 {
|
||||
toFree = bui.Size - cfg.Quota
|
||||
}
|
||||
|
||||
if toFree <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Allocate new results channel to receive ObjectInfo.
|
||||
objInfoCh := make(chan ObjectInfo)
|
||||
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
|
||||
// Walk through all objects
|
||||
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh, ObjectOptions{WalkVersions: versioned}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
// reuse the fileScorer used by disk cache to score entries by
|
||||
// ModTime to find the oldest objects in bucket to delete. In
|
||||
// the context of bucket quota enforcement - number of hits are
|
||||
// irrelevant.
|
||||
scorer, err := newFileScorer(toFree, time.Now().Unix(), 1)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, binfo := range buckets {
|
||||
bucket := binfo.Name
|
||||
|
||||
bui, ok := dataUsageInfo.BucketsUsage[bucket]
|
||||
if !ok {
|
||||
// bucket doesn't exist anymore, or we
|
||||
// do not have any information to proceed.
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the current bucket has quota restrictions, if not skip it
|
||||
cfg, err := globalBucketQuotaSys.Get(bucket)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if cfg.Type != madmin.FIFOQuota {
|
||||
continue
|
||||
}
|
||||
|
||||
var toFree uint64
|
||||
if bui.Size > cfg.Quota && cfg.Quota > 0 {
|
||||
toFree = bui.Size - cfg.Quota
|
||||
}
|
||||
|
||||
if toFree == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Allocate new results channel to receive ObjectInfo.
|
||||
objInfoCh := make(chan ObjectInfo)
|
||||
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
|
||||
// Walk through all objects
|
||||
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh, ObjectOptions{WalkVersions: versioned}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// reuse the fileScorer used by disk cache to score entries by
|
||||
// ModTime to find the oldest objects in bucket to delete. In
|
||||
// the context of bucket quota enforcement - number of hits are
|
||||
// irrelevant.
|
||||
scorer, err := newFileScorer(toFree, time.Now().Unix(), 1)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
|
||||
for obj := range objInfoCh {
|
||||
if obj.DeleteMarker {
|
||||
// Delete markers are automatically added for FIFO purge.
|
||||
scorer.addFileWithObjInfo(obj, 1)
|
||||
continue
|
||||
}
|
||||
// skip objects currently under retention
|
||||
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
|
||||
continue
|
||||
}
|
||||
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
|
||||
for obj := range objInfoCh {
|
||||
if obj.DeleteMarker {
|
||||
// Delete markers are automatically added for FIFO purge.
|
||||
scorer.addFileWithObjInfo(obj, 1)
|
||||
continue
|
||||
}
|
||||
// skip objects currently under retention
|
||||
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
|
||||
continue
|
||||
}
|
||||
scorer.addFileWithObjInfo(obj, 1)
|
||||
}
|
||||
|
||||
// If we saw less than quota we are good.
|
||||
if scorer.seenBytes <= cfg.Quota {
|
||||
return
|
||||
}
|
||||
// Calculate how much we want to delete now.
|
||||
toFreeNow := scorer.seenBytes - cfg.Quota
|
||||
// We were less over quota than we thought. Adjust so we delete less.
|
||||
// If we are more over, leave it for the next run to pick up.
|
||||
if toFreeNow < toFree {
|
||||
if !scorer.adjustSaveBytes(int64(toFreeNow) - int64(toFree)) {
|
||||
// We got below or at quota.
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var objects []ObjectToDelete
|
||||
numKeys := len(scorer.fileObjInfos())
|
||||
for i, obj := range scorer.fileObjInfos() {
|
||||
objects = append(objects, ObjectToDelete{
|
||||
ObjectName: obj.Name,
|
||||
VersionID: obj.VersionID,
|
||||
})
|
||||
if len(objects) < maxDeleteList && (i < numKeys-1) {
|
||||
// skip deletion until maxDeleteList or end of slice
|
||||
continue
|
||||
}
|
||||
|
||||
var objects []ObjectToDelete
|
||||
numKeys := len(scorer.fileObjInfos())
|
||||
for i, obj := range scorer.fileObjInfos() {
|
||||
objects = append(objects, ObjectToDelete{
|
||||
ObjectName: obj.Name,
|
||||
VersionID: obj.VersionID,
|
||||
})
|
||||
if len(objects) < maxDeleteList && (i < numKeys-1) {
|
||||
// skip deletion until maxDeleteList or end of slice
|
||||
if len(objects) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Deletes a list of objects.
|
||||
_, deleteErrs := objectAPI.DeleteObjects(ctx, bucket, objects, ObjectOptions{
|
||||
Versioned: versioned,
|
||||
})
|
||||
for i := range deleteErrs {
|
||||
if deleteErrs[i] != nil {
|
||||
logger.LogIf(ctx, deleteErrs[i])
|
||||
continue
|
||||
}
|
||||
|
||||
if len(objects) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Deletes a list of objects.
|
||||
_, deleteErrs := objectAPI.DeleteObjects(ctx, bucket, objects, ObjectOptions{
|
||||
Versioned: versioned,
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectRemovedDelete,
|
||||
BucketName: bucket,
|
||||
Object: obj,
|
||||
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
|
||||
})
|
||||
for i := range deleteErrs {
|
||||
if deleteErrs[i] != nil {
|
||||
logger.LogIf(ctx, deleteErrs[i])
|
||||
continue
|
||||
}
|
||||
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectRemovedDelete,
|
||||
BucketName: bucket,
|
||||
Object: obj,
|
||||
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
|
||||
})
|
||||
}
|
||||
objects = nil
|
||||
}
|
||||
objects = nil
|
||||
}
|
||||
}
|
||||
|
||||
991
cmd/bucket-replication.go
Normal file
991
cmd/bucket-replication.go
Normal file
@@ -0,0 +1,991 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/bandwidth"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
// gets replication config associated to a given bucket name.
|
||||
func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
return nil, BucketReplicationConfigNotFound{Bucket: bucketName}
|
||||
}
|
||||
|
||||
return globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName)
|
||||
}
|
||||
|
||||
// validateReplicationDestination returns error if replication destination bucket missing or not configured
|
||||
// It also returns true if replication destination is same as this server.
|
||||
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config) (bool, error) {
|
||||
arn, err := madmin.ParseARN(rCfg.RoleArn)
|
||||
if err != nil {
|
||||
return false, BucketRemoteArnInvalid{}
|
||||
}
|
||||
if arn.Type != madmin.ReplicationService {
|
||||
return false, BucketRemoteArnTypeInvalid{}
|
||||
}
|
||||
clnt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rCfg.RoleArn)
|
||||
if clnt == nil {
|
||||
return false, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
if found, _ := clnt.BucketExists(ctx, rCfg.GetDestination().Bucket); !found {
|
||||
return false, BucketRemoteDestinationNotFound{Bucket: rCfg.GetDestination().Bucket}
|
||||
}
|
||||
if ret, err := globalBucketObjectLockSys.Get(bucket); err == nil {
|
||||
if ret.LockEnabled {
|
||||
lock, _, _, _, err := clnt.GetObjectLockConfig(ctx, rCfg.GetDestination().Bucket)
|
||||
if err != nil || lock != "Enabled" {
|
||||
return false, BucketReplicationDestinationMissingLock{Bucket: rCfg.GetDestination().Bucket}
|
||||
}
|
||||
}
|
||||
}
|
||||
// validate replication ARN against target endpoint
|
||||
c, ok := globalBucketTargetSys.arnRemotesMap[rCfg.RoleArn]
|
||||
if ok {
|
||||
if c.EndpointURL().String() == clnt.EndpointURL().String() {
|
||||
sameTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
|
||||
return sameTarget, nil
|
||||
}
|
||||
}
|
||||
return false, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
func mustReplicateWeb(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string, permErr APIErrorCode) (replicate bool, sync bool) {
|
||||
if permErr != ErrNone {
|
||||
return
|
||||
}
|
||||
return mustReplicater(ctx, bucket, object, meta, replStatus)
|
||||
}
|
||||
|
||||
// mustReplicate returns 2 booleans - true if object meets replication criteria and true if replication is to be done in
|
||||
// a synchronous manner.
|
||||
func mustReplicate(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string) (replicate bool, sync bool) {
|
||||
if s3Err := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, "", r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
|
||||
return
|
||||
}
|
||||
return mustReplicater(ctx, bucket, object, meta, replStatus)
|
||||
}
|
||||
|
||||
// mustReplicater returns 2 booleans - true if object meets replication criteria and true if replication is to be done in
|
||||
// a synchronous manner.
|
||||
func mustReplicater(ctx context.Context, bucket, object string, meta map[string]string, replStatus string) (replicate bool, sync bool) {
|
||||
if globalIsGateway {
|
||||
return replicate, sync
|
||||
}
|
||||
if rs, ok := meta[xhttp.AmzBucketReplicationStatus]; ok {
|
||||
replStatus = rs
|
||||
}
|
||||
if replication.StatusType(replStatus) == replication.Replica {
|
||||
return replicate, sync
|
||||
}
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
return replicate, sync
|
||||
}
|
||||
opts := replication.ObjectOpts{
|
||||
Name: object,
|
||||
SSEC: crypto.SSEC.IsEncrypted(meta),
|
||||
}
|
||||
tagStr, ok := meta[xhttp.AmzObjectTagging]
|
||||
if ok {
|
||||
opts.UserTags = tagStr
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, cfg.RoleArn)
|
||||
// the target online status should not be used here while deciding
|
||||
// whether to replicate as the target could be temporarily down
|
||||
if tgt != nil {
|
||||
return cfg.Replicate(opts), tgt.replicateSync
|
||||
}
|
||||
return cfg.Replicate(opts), false
|
||||
}
|
||||
|
||||
// Standard headers that needs to be extracted from User metadata.
|
||||
var standardHeaders = []string{
|
||||
xhttp.ContentType,
|
||||
xhttp.CacheControl,
|
||||
xhttp.ContentEncoding,
|
||||
xhttp.ContentLanguage,
|
||||
xhttp.ContentDisposition,
|
||||
xhttp.AmzStorageClass,
|
||||
xhttp.AmzObjectTagging,
|
||||
xhttp.AmzBucketReplicationStatus,
|
||||
xhttp.AmzObjectLockMode,
|
||||
xhttp.AmzObjectLockRetainUntilDate,
|
||||
xhttp.AmzObjectLockLegalHold,
|
||||
xhttp.AmzTagCount,
|
||||
xhttp.AmzServerSideEncryption,
|
||||
}
|
||||
|
||||
// returns true if any of the objects being deleted qualifies for replication.
|
||||
func hasReplicationRules(ctx context.Context, bucket string, objects []ObjectToDelete) bool {
|
||||
c, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || c == nil {
|
||||
return false
|
||||
}
|
||||
for _, obj := range objects {
|
||||
if c.HasActiveRules(obj.ObjectName, true) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isStandardHeader returns true if header is a supported header and not a custom header
|
||||
func isStandardHeader(matchHeaderKey string) bool {
|
||||
return equals(matchHeaderKey, standardHeaders...)
|
||||
}
|
||||
|
||||
// returns whether object version is a deletemarker and if object qualifies for replication
|
||||
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, gerr error) (dm, replicate, sync bool) {
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || rcfg == nil {
|
||||
return false, false, sync
|
||||
}
|
||||
opts := replication.ObjectOpts{
|
||||
Name: dobj.ObjectName,
|
||||
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
|
||||
UserTags: oi.UserTags,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
VersionID: dobj.VersionID,
|
||||
}
|
||||
replicate = rcfg.Replicate(opts)
|
||||
// when incoming delete is removal of a delete marker( a.k.a versioned delete),
|
||||
// GetObjectInfo returns extra information even though it returns errFileNotFound
|
||||
if gerr != nil {
|
||||
validReplStatus := false
|
||||
switch oi.ReplicationStatus {
|
||||
case replication.Pending, replication.Completed, replication.Failed:
|
||||
validReplStatus = true
|
||||
}
|
||||
if oi.DeleteMarker && (validReplStatus || replicate) {
|
||||
return oi.DeleteMarker, true, sync
|
||||
}
|
||||
// can be the case that other cluster is down and duplicate `mc rm --vid`
|
||||
// is issued - this still needs to be replicated back to the other target
|
||||
return oi.DeleteMarker, oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed, sync
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
|
||||
// the target online status should not be used here while deciding
|
||||
// whether to replicate deletes as the target could be temporarily down
|
||||
if tgt == nil {
|
||||
return oi.DeleteMarker, false, false
|
||||
}
|
||||
return oi.DeleteMarker, replicate, tgt.replicateSync
|
||||
}
|
||||
|
||||
// replicate deletes to the designated replication target if replication configuration
|
||||
// has delete marker replication or delete replication (MinIO extension to allow deletes where version id
|
||||
// is specified) enabled.
|
||||
// Similar to bucket replication for PUT operation, soft delete (a.k.a setting delete marker) and
|
||||
// permanent deletes (by specifying a version ID in the delete operation) have three states "Pending", "Complete"
|
||||
// and "Failed" to mark the status of the replication of "DELETE" operation. All failed operations can
|
||||
// then be retried by healing. In the case of permanent deletes, until the replication is completed on the
|
||||
// target cluster, the object version is marked deleted on the source and hidden from listing. It is permanently
|
||||
// deleted from the source when the VersionPurgeStatus changes to "Complete", i.e after replication succeeds
|
||||
// on target.
|
||||
func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectAPI ObjectLayer) {
|
||||
bucket := dobj.Bucket
|
||||
versionID := dobj.DeleteMarkerVersionID
|
||||
if versionID == "" {
|
||||
versionID = dobj.VersionID
|
||||
}
|
||||
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || rcfg == nil {
|
||||
logger.LogIf(ctx, err)
|
||||
sendEvent(eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: versionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
},
|
||||
Host: "Internal: [Replication]",
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
|
||||
if tgt == nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, rcfg.RoleArn))
|
||||
sendEvent(eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: versionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
},
|
||||
Host: "Internal: [Replication]",
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
rmErr := tgt.RemoveObject(ctx, rcfg.GetDestination().Bucket, dobj.ObjectName, miniogo.RemoveObjectOptions{
|
||||
VersionID: versionID,
|
||||
Internal: miniogo.AdvancedRemoveOptions{
|
||||
ReplicationDeleteMarker: dobj.DeleteMarkerVersionID != "",
|
||||
ReplicationMTime: dobj.DeleteMarkerMTime.Time,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
},
|
||||
})
|
||||
|
||||
replicationStatus := dobj.DeleteMarkerReplicationStatus
|
||||
versionPurgeStatus := dobj.VersionPurgeStatus
|
||||
|
||||
if rmErr != nil {
|
||||
if dobj.VersionID == "" {
|
||||
replicationStatus = string(replication.Failed)
|
||||
} else {
|
||||
versionPurgeStatus = Failed
|
||||
}
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate delete marker to %s/%s(%s): %s", rcfg.GetDestination().Bucket, dobj.ObjectName, versionID, rmErr))
|
||||
} else {
|
||||
if dobj.VersionID == "" {
|
||||
replicationStatus = string(replication.Completed)
|
||||
} else {
|
||||
versionPurgeStatus = Complete
|
||||
}
|
||||
}
|
||||
|
||||
var eventName = event.ObjectReplicationComplete
|
||||
if replicationStatus == string(replication.Failed) || versionPurgeStatus == Failed {
|
||||
eventName = event.ObjectReplicationFailed
|
||||
}
|
||||
|
||||
// Update metadata on the delete marker or purge permanent delete if replication success.
|
||||
dobjInfo, err := objectAPI.DeleteObject(ctx, bucket, dobj.ObjectName, ObjectOptions{
|
||||
VersionID: versionID,
|
||||
DeleteMarkerReplicationStatus: replicationStatus,
|
||||
VersionPurgeStatus: versionPurgeStatus,
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
})
|
||||
if err != nil && !isErrVersionNotFound(err) { // VersionNotFound would be reported by pool that object version is missing on.
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %s", bucket, dobj.ObjectName, versionID, err))
|
||||
sendEvent(eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: versionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
},
|
||||
Host: "Internal: [Replication]",
|
||||
EventName: eventName,
|
||||
})
|
||||
} else {
|
||||
sendEvent(eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: dobjInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
EventName: eventName,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getCopyObjMetadata(oi ObjectInfo, dest replication.Destination) map[string]string {
|
||||
meta := make(map[string]string, len(oi.UserDefined))
|
||||
for k, v := range oi.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
continue
|
||||
}
|
||||
|
||||
if equals(k, xhttp.AmzBucketReplicationStatus) {
|
||||
continue
|
||||
}
|
||||
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
|
||||
meta[k] = v
|
||||
}
|
||||
|
||||
if oi.ContentEncoding != "" {
|
||||
meta[xhttp.ContentEncoding] = oi.ContentEncoding
|
||||
}
|
||||
|
||||
if oi.ContentType != "" {
|
||||
meta[xhttp.ContentType] = oi.ContentType
|
||||
}
|
||||
|
||||
if oi.UserTags != "" {
|
||||
meta[xhttp.AmzObjectTagging] = oi.UserTags
|
||||
meta[xhttp.AmzTagDirective] = "REPLACE"
|
||||
}
|
||||
|
||||
sc := dest.StorageClass
|
||||
if sc == "" {
|
||||
sc = oi.StorageClass
|
||||
}
|
||||
if sc != "" {
|
||||
meta[xhttp.AmzStorageClass] = sc
|
||||
}
|
||||
meta[xhttp.MinIOSourceETag] = oi.ETag
|
||||
meta[xhttp.MinIOSourceMTime] = oi.ModTime.Format(time.RFC3339Nano)
|
||||
meta[xhttp.AmzBucketReplicationStatus] = replication.Replica.String()
|
||||
return meta
|
||||
}
|
||||
|
||||
type caseInsensitiveMap map[string]string
|
||||
|
||||
// Lookup map entry case insensitively.
|
||||
func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
|
||||
if len(m) == 0 {
|
||||
return "", false
|
||||
}
|
||||
for _, k := range []string{
|
||||
key,
|
||||
strings.ToLower(key),
|
||||
http.CanonicalHeaderKey(key),
|
||||
} {
|
||||
v, ok := m[k]
|
||||
if ok {
|
||||
return v, ok
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func putReplicationOpts(ctx context.Context, dest replication.Destination, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
|
||||
meta := make(map[string]string)
|
||||
for k, v := range objInfo.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
continue
|
||||
}
|
||||
if isStandardHeader(k) {
|
||||
continue
|
||||
}
|
||||
meta[k] = v
|
||||
}
|
||||
|
||||
sc := dest.StorageClass
|
||||
if sc == "" {
|
||||
sc = objInfo.StorageClass
|
||||
}
|
||||
putOpts = miniogo.PutObjectOptions{
|
||||
UserMetadata: meta,
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: sc,
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
},
|
||||
}
|
||||
if objInfo.UserTags != "" {
|
||||
tag, _ := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if tag != nil {
|
||||
putOpts.UserTags = tag.ToMap()
|
||||
}
|
||||
}
|
||||
|
||||
lkMap := caseInsensitiveMap(objInfo.UserDefined)
|
||||
if lang, ok := lkMap.Lookup(xhttp.ContentLanguage); ok {
|
||||
putOpts.ContentLanguage = lang
|
||||
}
|
||||
if disp, ok := lkMap.Lookup(xhttp.ContentDisposition); ok {
|
||||
putOpts.ContentDisposition = disp
|
||||
}
|
||||
if cc, ok := lkMap.Lookup(xhttp.CacheControl); ok {
|
||||
putOpts.CacheControl = cc
|
||||
}
|
||||
if mode, ok := lkMap.Lookup(xhttp.AmzObjectLockMode); ok {
|
||||
rmode := miniogo.RetentionMode(mode)
|
||||
putOpts.Mode = rmode
|
||||
}
|
||||
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
|
||||
rdate, err := time.Parse(time.RFC3339, retainDateStr)
|
||||
if err != nil {
|
||||
return putOpts, err
|
||||
}
|
||||
putOpts.RetainUntilDate = rdate
|
||||
}
|
||||
if lhold, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
|
||||
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
|
||||
}
|
||||
if crypto.S3.IsEncrypted(objInfo.UserDefined) {
|
||||
putOpts.ServerSideEncryption = encrypt.NewSSE()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type replicationAction string
|
||||
|
||||
const (
|
||||
replicateMetadata replicationAction = "metadata"
|
||||
replicateNone replicationAction = "none"
|
||||
replicateAll replicationAction = "all"
|
||||
)
|
||||
|
||||
// matches k1 with all keys, returns 'true' if one of them matches
|
||||
func equals(k1 string, keys ...string) bool {
|
||||
for _, k2 := range keys {
|
||||
if strings.ToLower(k1) == strings.ToLower(k2) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// returns replicationAction by comparing metadata between source and target
|
||||
func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo) replicationAction {
|
||||
// needs full replication
|
||||
if oi1.ETag != oi2.ETag ||
|
||||
oi1.VersionID != oi2.VersionID ||
|
||||
oi1.Size != oi2.Size ||
|
||||
oi1.DeleteMarker != oi2.IsDeleteMarker ||
|
||||
oi1.ModTime.Unix() != oi2.LastModified.Unix() {
|
||||
return replicateAll
|
||||
}
|
||||
|
||||
if oi1.ContentType != oi2.ContentType {
|
||||
return replicateMetadata
|
||||
}
|
||||
|
||||
if oi1.ContentEncoding != "" {
|
||||
enc, ok := oi2.Metadata[xhttp.ContentEncoding]
|
||||
if !ok {
|
||||
enc, ok = oi2.Metadata[strings.ToLower(xhttp.ContentEncoding)]
|
||||
if !ok {
|
||||
return replicateMetadata
|
||||
}
|
||||
}
|
||||
if strings.Join(enc, ",") != oi1.ContentEncoding {
|
||||
return replicateMetadata
|
||||
}
|
||||
}
|
||||
|
||||
t, _ := tags.ParseObjectTags(oi1.UserTags)
|
||||
if !reflect.DeepEqual(oi2.UserTags, t.ToMap()) {
|
||||
return replicateMetadata
|
||||
}
|
||||
|
||||
// Compare only necessary headers
|
||||
compareKeys := []string{
|
||||
"Expires",
|
||||
"Cache-Control",
|
||||
"Content-Language",
|
||||
"Content-Disposition",
|
||||
"X-Amz-Object-Lock-Mode",
|
||||
"X-Amz-Object-Lock-Retain-Until-Date",
|
||||
"X-Amz-Object-Lock-Legal-Hold",
|
||||
"X-Amz-Website-Redirect-Location",
|
||||
"X-Amz-Meta-",
|
||||
}
|
||||
|
||||
// compare metadata on both maps to see if meta is identical
|
||||
compareMeta1 := make(map[string]string)
|
||||
for k, v := range oi1.UserDefined {
|
||||
var found bool
|
||||
for _, prefix := range compareKeys {
|
||||
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if found {
|
||||
compareMeta1[strings.ToLower(k)] = v
|
||||
}
|
||||
}
|
||||
|
||||
compareMeta2 := make(map[string]string)
|
||||
for k, v := range oi2.Metadata {
|
||||
var found bool
|
||||
for _, prefix := range compareKeys {
|
||||
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if found {
|
||||
compareMeta2[strings.ToLower(k)] = strings.Join(v, ",")
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(compareMeta1, compareMeta2) {
|
||||
return replicateMetadata
|
||||
}
|
||||
|
||||
return replicateNone
|
||||
}
|
||||
|
||||
// replicateObject replicates the specified version of the object to destination bucket
|
||||
// The source object is then updated to reflect the replication status.
|
||||
func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLayer) {
|
||||
z, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
bucket := objInfo.Bucket
|
||||
object := objInfo.Name
|
||||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
return
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, cfg.RoleArn)
|
||||
if tgt == nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, cfg.RoleArn))
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
return
|
||||
}
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
})
|
||||
if err != nil {
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
defer gr.Close() // hold read lock for entire transaction
|
||||
|
||||
objInfo = gr.ObjInfo
|
||||
size, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
dest := cfg.GetDestination()
|
||||
if dest.Bucket == "" {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate object %s(%s), bucket is empty", objInfo.Name, objInfo.VersionID))
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
rtype := replicateAll
|
||||
oi, err := tgt.StatObject(ctx, dest.Bucket, object, miniogo.StatObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
Internal: miniogo.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "false",
|
||||
}})
|
||||
if err == nil {
|
||||
rtype = getReplicationAction(objInfo, oi)
|
||||
if rtype == replicateNone {
|
||||
// object with same VersionID already exists, replication kicked off by
|
||||
// PutObject might have completed.
|
||||
return
|
||||
}
|
||||
}
|
||||
replicationStatus := replication.Completed
|
||||
// use core client to avoid doing multipart on PUT
|
||||
c := &miniogo.Core{Client: tgt.Client}
|
||||
if rtype != replicateAll {
|
||||
// replicate metadata for object tagging/copy with metadata replacement
|
||||
srcOpts := miniogo.CopySrcOptions{
|
||||
Bucket: dest.Bucket,
|
||||
Object: object,
|
||||
VersionID: objInfo.VersionID}
|
||||
dstOpts := miniogo.PutObjectOptions{Internal: miniogo.AdvancedPutOptions{SourceVersionID: objInfo.VersionID}}
|
||||
if _, err = c.CopyObject(ctx, dest.Bucket, object, dest.Bucket, object, getCopyObjMetadata(objInfo, dest), srcOpts, dstOpts); err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate metadata for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
}
|
||||
} else {
|
||||
target, err := globalBucketMetadataSys.GetBucketTarget(bucket, cfg.RoleArn)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s cfg:%s err:%s", bucket, cfg.RoleArn, err))
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
putOpts, err := putReplicationOpts(ctx, dest, objInfo)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s cfg:%s err:%w", bucket, cfg.RoleArn, err))
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Setup bandwidth throttling
|
||||
peers, _ := globalEndpoints.peers()
|
||||
totalNodesCount := len(peers)
|
||||
if totalNodesCount == 0 {
|
||||
totalNodesCount = 1 // For standalone erasure coding
|
||||
}
|
||||
b := target.BandwidthLimit / int64(totalNodesCount)
|
||||
var headerSize int
|
||||
for k, v := range putOpts.Header() {
|
||||
headerSize += len(k) + len(v)
|
||||
}
|
||||
|
||||
// r takes over closing gr.
|
||||
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, objInfo.Bucket, objInfo.Name, gr, headerSize, b, target.BandwidthLimit)
|
||||
if _, err = c.PutObject(ctx, dest.Bucket, object, r, size, "", "", putOpts); err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate for object %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
}
|
||||
defer r.Close()
|
||||
}
|
||||
|
||||
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||
if objInfo.UserTags != "" {
|
||||
objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
}
|
||||
|
||||
// FIXME: add support for missing replication events
|
||||
// - event.ObjectReplicationMissedThreshold
|
||||
// - event.ObjectReplicationReplicatedAfterThreshold
|
||||
var eventName = event.ObjectReplicationComplete
|
||||
if replicationStatus == replication.Failed {
|
||||
eventName = event.ObjectReplicationFailed
|
||||
}
|
||||
|
||||
// This lower level implementation is necessary to avoid write locks from CopyObject.
|
||||
poolIdx, err := z.getPoolIdx(ctx, bucket, object, objInfo.Size)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
} else {
|
||||
if err = z.serverPools[poolIdx].getHashedSet(object).updateObjectMeta(ctx, bucket, object, objInfo.UserDefined, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
}
|
||||
}
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
}
|
||||
|
||||
// filterReplicationStatusMetadata filters replication status metadata for COPY
|
||||
func filterReplicationStatusMetadata(metadata map[string]string) map[string]string {
|
||||
// Copy on write
|
||||
dst := metadata
|
||||
var copied bool
|
||||
delKey := func(key string) {
|
||||
if _, ok := metadata[key]; !ok {
|
||||
return
|
||||
}
|
||||
if !copied {
|
||||
dst = make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
dst[k] = v
|
||||
}
|
||||
copied = true
|
||||
}
|
||||
delete(dst, key)
|
||||
}
|
||||
|
||||
delKey(xhttp.AmzBucketReplicationStatus)
|
||||
return dst
|
||||
}
|
||||
|
||||
// DeletedObjectVersionInfo has info on deleted object
|
||||
type DeletedObjectVersionInfo struct {
|
||||
DeletedObject
|
||||
Bucket string
|
||||
}
|
||||
type replicationState struct {
|
||||
// add future metrics here
|
||||
replicaCh chan ObjectInfo
|
||||
replicaDeleteCh chan DeletedObjectVersionInfo
|
||||
}
|
||||
|
||||
func (r *replicationState) queueReplicaTask(oi ObjectInfo) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case r.replicaCh <- oi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (r *replicationState) queueReplicaDeleteTask(doi DeletedObjectVersionInfo) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case r.replicaDeleteCh <- doi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalReplicationState *replicationState
|
||||
)
|
||||
|
||||
func newReplicationState() *replicationState {
|
||||
rs := &replicationState{
|
||||
replicaCh: make(chan ObjectInfo, 10000),
|
||||
replicaDeleteCh: make(chan DeletedObjectVersionInfo, 10000),
|
||||
}
|
||||
go func() {
|
||||
<-GlobalContext.Done()
|
||||
close(rs.replicaCh)
|
||||
close(rs.replicaDeleteCh)
|
||||
}()
|
||||
return rs
|
||||
}
|
||||
|
||||
// addWorker creates a new worker to process tasks
|
||||
func (r *replicationState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
// Add a new worker.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case oi, ok := <-r.replicaCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateObject(ctx, oi, objectAPI)
|
||||
case doi, ok := <-r.replicaDeleteCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateDelete(ctx, doi, objectAPI)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
|
||||
if globalReplicationState == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start replication workers per count set in api config or MINIO_API_REPLICATION_WORKERS.
|
||||
for i := 0; i < globalAPIConfig.getReplicationWorkers(); i++ {
|
||||
globalReplicationState.addWorker(ctx, objectAPI)
|
||||
}
|
||||
}
|
||||
|
||||
// get Reader from replication target if active-active replication is in place and
|
||||
// this node returns a 404
|
||||
func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, proxy bool) {
|
||||
tgt, oi, proxy, err := proxyHeadToRepTarget(ctx, bucket, object, opts)
|
||||
if !proxy || err != nil {
|
||||
return nil, false
|
||||
}
|
||||
fn, off, length, err := NewGetObjectReader(rs, oi, opts)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
gopts := miniogo.GetObjectOptions{
|
||||
VersionID: opts.VersionID,
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
Internal: miniogo.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "true",
|
||||
},
|
||||
}
|
||||
// get correct offsets for encrypted object
|
||||
if off >= 0 && length >= 0 {
|
||||
if err := gopts.SetRange(off, off+length-1); err != nil {
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
// Make sure to match ETag when proxying.
|
||||
if err = gopts.SetMatchETag(oi.ETag); err != nil {
|
||||
return nil, false
|
||||
}
|
||||
c := miniogo.Core{Client: tgt.Client}
|
||||
obj, _, _, err := c.GetObject(ctx, bucket, object, gopts)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
closeReader := func() { obj.Close() }
|
||||
|
||||
reader, err := fn(obj, h, opts.CheckPrecondFn, closeReader)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
reader.ObjInfo = oi.Clone()
|
||||
return reader, true
|
||||
}
|
||||
|
||||
// isProxyable returns true if replication config found for this bucket
|
||||
func isProxyable(ctx context.Context, bucket string) bool {
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
dest := cfg.GetDestination()
|
||||
return dest.Bucket == bucket
|
||||
}
|
||||
|
||||
func proxyHeadToRepTarget(ctx context.Context, bucket, object string, opts ObjectOptions) (tgt *TargetClient, oi ObjectInfo, proxy bool, err error) {
|
||||
// this option is set when active-active replication is in place between site A -> B,
|
||||
// and site B does not have the object yet.
|
||||
if opts.ProxyRequest || (opts.ProxyHeaderSet && !opts.ProxyRequest) { // true only when site B sets MinIOSourceProxyRequest header
|
||||
return nil, oi, false, nil
|
||||
}
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, oi, false, err
|
||||
}
|
||||
dest := cfg.GetDestination()
|
||||
if dest.Bucket != bucket { // not active-active
|
||||
return nil, oi, false, err
|
||||
}
|
||||
ssec := false
|
||||
if opts.ServerSideEncryption != nil {
|
||||
ssec = opts.ServerSideEncryption.Type() == encrypt.SSEC
|
||||
}
|
||||
ropts := replication.ObjectOpts{
|
||||
Name: object,
|
||||
SSEC: ssec,
|
||||
}
|
||||
if !cfg.Replicate(ropts) { // no matching rule for object prefix
|
||||
return nil, oi, false, nil
|
||||
}
|
||||
tgt = globalBucketTargetSys.GetRemoteTargetClient(ctx, cfg.RoleArn)
|
||||
if tgt == nil || tgt.isOffline() {
|
||||
return nil, oi, false, fmt.Errorf("target is offline or not configured")
|
||||
}
|
||||
|
||||
gopts := miniogo.GetObjectOptions{
|
||||
VersionID: opts.VersionID,
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
Internal: miniogo.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "true",
|
||||
},
|
||||
}
|
||||
|
||||
objInfo, err := tgt.StatObject(ctx, dest.Bucket, object, gopts)
|
||||
if err != nil {
|
||||
return nil, oi, false, err
|
||||
}
|
||||
|
||||
tags, _ := tags.MapToObjectTags(objInfo.UserTags)
|
||||
oi = ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
ModTime: objInfo.LastModified,
|
||||
Size: objInfo.Size,
|
||||
ETag: objInfo.ETag,
|
||||
VersionID: objInfo.VersionID,
|
||||
IsLatest: objInfo.IsLatest,
|
||||
DeleteMarker: objInfo.IsDeleteMarker,
|
||||
ContentType: objInfo.ContentType,
|
||||
Expires: objInfo.Expires,
|
||||
StorageClass: objInfo.StorageClass,
|
||||
ReplicationStatus: replication.StatusType(objInfo.ReplicationStatus),
|
||||
UserTags: tags.String(),
|
||||
}
|
||||
oi.UserDefined = make(map[string]string, len(objInfo.Metadata))
|
||||
for k, v := range objInfo.Metadata {
|
||||
oi.UserDefined[k] = v[0]
|
||||
}
|
||||
ce, ok := oi.UserDefined[xhttp.ContentEncoding]
|
||||
if !ok {
|
||||
ce, ok = oi.UserDefined[strings.ToLower(xhttp.ContentEncoding)]
|
||||
}
|
||||
if ok {
|
||||
oi.ContentEncoding = ce
|
||||
}
|
||||
return tgt, oi, true, nil
|
||||
}
|
||||
|
||||
// get object info from replication target if active-active replication is in place and
|
||||
// this node returns a 404
|
||||
func proxyHeadToReplicationTarget(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, proxy bool, err error) {
|
||||
_, oi, proxy, err = proxyHeadToRepTarget(ctx, bucket, object, opts)
|
||||
return oi, proxy, err
|
||||
}
|
||||
|
||||
func scheduleReplication(ctx context.Context, objInfo ObjectInfo, o ObjectLayer, sync bool) {
|
||||
if sync {
|
||||
replicateObject(ctx, objInfo, o)
|
||||
} else {
|
||||
globalReplicationState.queueReplicaTask(objInfo)
|
||||
}
|
||||
}
|
||||
|
||||
func scheduleReplicationDelete(ctx context.Context, dv DeletedObjectVersionInfo, o ObjectLayer, sync bool) {
|
||||
if sync {
|
||||
replicateDelete(ctx, dv, o)
|
||||
} else {
|
||||
globalReplicationState.queueReplicaDeleteTask(dv)
|
||||
}
|
||||
}
|
||||
471
cmd/bucket-targets.go
Normal file
471
cmd/bucket-targets.go
Normal file
@@ -0,0 +1,471 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHealthCheckDuration = 100 * time.Second
|
||||
)
|
||||
|
||||
// BucketTargetSys represents bucket targets subsystem
|
||||
type BucketTargetSys struct {
|
||||
sync.RWMutex
|
||||
arnRemotesMap map[string]*TargetClient
|
||||
targetsMap map[string][]madmin.BucketTarget
|
||||
}
|
||||
|
||||
// ListTargets lists bucket targets across tenant or for individual bucket, and returns
|
||||
// results filtered by arnType
|
||||
func (sys *BucketTargetSys) ListTargets(ctx context.Context, bucket, arnType string) (targets []madmin.BucketTarget) {
|
||||
if bucket != "" {
|
||||
if ts, err := sys.ListBucketTargets(ctx, bucket); err == nil {
|
||||
for _, t := range ts.Targets {
|
||||
if string(t.Type) == arnType || arnType == "" {
|
||||
targets = append(targets, t.Clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
return targets
|
||||
}
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
for _, tgts := range sys.targetsMap {
|
||||
for _, t := range tgts {
|
||||
if string(t.Type) == arnType || arnType == "" {
|
||||
targets = append(targets, t.Clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListBucketTargets - gets list of bucket targets for this bucket.
|
||||
func (sys *BucketTargetSys) ListBucketTargets(ctx context.Context, bucket string) (*madmin.BucketTargets, error) {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
|
||||
tgts, ok := sys.targetsMap[bucket]
|
||||
if ok {
|
||||
return &madmin.BucketTargets{Targets: tgts}, nil
|
||||
}
|
||||
return nil, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
// SetTarget - sets a new minio-go client target for this bucket.
|
||||
func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *madmin.BucketTarget, update bool) error {
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
if !tgt.Type.IsValid() && !update {
|
||||
return BucketRemoteArnTypeInvalid{Bucket: bucket}
|
||||
}
|
||||
clnt, err := sys.getRemoteTargetClient(tgt)
|
||||
if err != nil {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
// validate if target credentials are ok
|
||||
if _, err = clnt.BucketExists(ctx, tgt.TargetBucket); err != nil {
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if tgt.Type == madmin.ReplicationService {
|
||||
if !globalIsErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
if !globalBucketVersioningSys.Enabled(bucket) {
|
||||
return BucketReplicationSourceNotVersioned{Bucket: bucket}
|
||||
}
|
||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||
if err != nil {
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
}
|
||||
if tgt.Type == madmin.ILMService {
|
||||
if globalBucketVersioningSys.Enabled(bucket) {
|
||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||
if err != nil {
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
}
|
||||
}
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
tgts := sys.targetsMap[bucket]
|
||||
newtgts := make([]madmin.BucketTarget, len(tgts))
|
||||
labels := make(map[string]struct{}, len(tgts))
|
||||
found := false
|
||||
for idx, t := range tgts {
|
||||
labels[t.Label] = struct{}{}
|
||||
if t.Type == tgt.Type {
|
||||
if t.Arn == tgt.Arn && !update {
|
||||
return BucketRemoteAlreadyExists{Bucket: t.TargetBucket}
|
||||
}
|
||||
if t.Label == tgt.Label && !update {
|
||||
return BucketRemoteLabelInUse{Bucket: t.TargetBucket}
|
||||
}
|
||||
newtgts[idx] = *tgt
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
newtgts[idx] = t
|
||||
}
|
||||
if _, ok := labels[tgt.Label]; ok && !update {
|
||||
return BucketRemoteLabelInUse{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if !found && !update {
|
||||
newtgts = append(newtgts, *tgt)
|
||||
}
|
||||
|
||||
sys.targetsMap[bucket] = newtgts
|
||||
sys.arnRemotesMap[tgt.Arn] = clnt
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveTarget - removes a remote bucket target for this source bucket.
|
||||
func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr string) error {
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
if arnStr == "" {
|
||||
return BucketRemoteArnInvalid{Bucket: bucket}
|
||||
}
|
||||
arn, err := madmin.ParseARN(arnStr)
|
||||
if err != nil {
|
||||
return BucketRemoteArnInvalid{Bucket: bucket}
|
||||
}
|
||||
if arn.Type == madmin.ReplicationService {
|
||||
if !globalIsErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
// reject removal of remote target if replication configuration is present
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err == nil && rcfg.RoleArn == arnStr {
|
||||
if _, ok := sys.arnRemotesMap[arnStr]; ok {
|
||||
return BucketRemoteRemoveDisallowed{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
}
|
||||
if arn.Type == madmin.ILMService {
|
||||
// reject removal of remote target if lifecycle transition uses this arn
|
||||
config, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
|
||||
if err == nil && transitionSCInUse(ctx, config, bucket, arnStr) {
|
||||
if _, ok := sys.arnRemotesMap[arnStr]; ok {
|
||||
return BucketRemoteRemoveDisallowed{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// delete ARN type from list of matching targets
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
found := false
|
||||
tgts, ok := sys.targetsMap[bucket]
|
||||
if !ok {
|
||||
return BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
targets := make([]madmin.BucketTarget, 0, len(tgts))
|
||||
for _, tgt := range tgts {
|
||||
if tgt.Arn != arnStr {
|
||||
targets = append(targets, tgt)
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
}
|
||||
if !found {
|
||||
return BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
sys.targetsMap[bucket] = targets
|
||||
delete(sys.arnRemotesMap, arnStr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRemoteTargetClient returns minio-go client for replication target instance
|
||||
func (sys *BucketTargetSys) GetRemoteTargetClient(ctx context.Context, arn string) *TargetClient {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
return sys.arnRemotesMap[arn]
|
||||
}
|
||||
|
||||
// GetRemoteTargetWithLabel returns bucket target given a target label
|
||||
func (sys *BucketTargetSys) GetRemoteTargetWithLabel(ctx context.Context, bucket, targetLabel string) *madmin.BucketTarget {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
for _, t := range sys.targetsMap[bucket] {
|
||||
if strings.ToUpper(t.Label) == strings.ToUpper(targetLabel) {
|
||||
tgt := t.Clone()
|
||||
return &tgt
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRemoteArnWithLabel returns bucket target's ARN given its target label
|
||||
func (sys *BucketTargetSys) GetRemoteArnWithLabel(ctx context.Context, bucket, tgtLabel string) *madmin.ARN {
|
||||
tgt := sys.GetRemoteTargetWithLabel(ctx, bucket, tgtLabel)
|
||||
if tgt == nil {
|
||||
return nil
|
||||
}
|
||||
arn, err := madmin.ParseARN(tgt.Arn)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return arn
|
||||
}
|
||||
|
||||
// GetRemoteLabelWithArn returns a bucket target's label given its ARN
|
||||
func (sys *BucketTargetSys) GetRemoteLabelWithArn(ctx context.Context, bucket, arnStr string) string {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
for _, t := range sys.targetsMap[bucket] {
|
||||
if t.Arn == arnStr {
|
||||
return t.Label
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// NewBucketTargetSys - creates new replication system.
|
||||
func NewBucketTargetSys() *BucketTargetSys {
|
||||
return &BucketTargetSys{
|
||||
arnRemotesMap: make(map[string]*TargetClient),
|
||||
targetsMap: make(map[string][]madmin.BucketTarget),
|
||||
}
|
||||
}
|
||||
|
||||
// Init initializes the bucket targets subsystem for buckets which have targets configured.
|
||||
func (sys *BucketTargetSys) Init(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// In gateway mode, bucket targets is not supported.
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load bucket targets once during boot in background.
|
||||
go sys.load(ctx, buckets, objAPI)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateAllTargets updates target to reflect metadata updates
|
||||
func (sys *BucketTargetSys) UpdateAllTargets(bucket string, tgts *madmin.BucketTargets) {
|
||||
if sys == nil {
|
||||
return
|
||||
}
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
if tgts == nil || tgts.Empty() {
|
||||
// remove target and arn association
|
||||
if tgts, ok := sys.targetsMap[bucket]; ok {
|
||||
for _, t := range tgts {
|
||||
delete(sys.arnRemotesMap, t.Arn)
|
||||
}
|
||||
}
|
||||
delete(sys.targetsMap, bucket)
|
||||
return
|
||||
}
|
||||
|
||||
if len(tgts.Targets) > 0 {
|
||||
sys.targetsMap[bucket] = tgts.Targets
|
||||
}
|
||||
for _, tgt := range tgts.Targets {
|
||||
tgtClient, err := sys.getRemoteTargetClient(&tgt)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
||||
}
|
||||
sys.targetsMap[bucket] = tgts.Targets
|
||||
}
|
||||
|
||||
// create minio-go clients for buckets having remote targets
|
||||
func (sys *BucketTargetSys) load(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) {
|
||||
for _, bucket := range buckets {
|
||||
cfg, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if cfg == nil || cfg.Empty() {
|
||||
continue
|
||||
}
|
||||
if len(cfg.Targets) > 0 {
|
||||
sys.targetsMap[bucket.Name] = cfg.Targets
|
||||
}
|
||||
for _, tgt := range cfg.Targets {
|
||||
tgtClient, err := sys.getRemoteTargetClient(&tgt)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
||||
}
|
||||
sys.targetsMap[bucket.Name] = cfg.Targets
|
||||
}
|
||||
}
|
||||
|
||||
// getRemoteTargetInstanceTransport contains a singleton roundtripper.
|
||||
var getRemoteTargetInstanceTransport http.RoundTripper
|
||||
var getRemoteTargetInstanceTransportOnce sync.Once
|
||||
|
||||
// Returns a minio-go Client configured to access remote host described in replication target config.
|
||||
func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*TargetClient, error) {
|
||||
config := tcfg.Credentials
|
||||
creds := credentials.NewStaticV4(config.AccessKey, config.SecretKey, "")
|
||||
|
||||
getRemoteTargetInstanceTransportOnce.Do(func() {
|
||||
getRemoteTargetInstanceTransport = newGatewayHTTPTransport(10 * time.Minute)
|
||||
})
|
||||
api, err := minio.New(tcfg.Endpoint, &miniogo.Options{
|
||||
Creds: creds,
|
||||
Secure: tcfg.Secure,
|
||||
Region: tcfg.Region,
|
||||
Transport: getRemoteTargetInstanceTransport,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hcDuration := defaultHealthCheckDuration
|
||||
if tcfg.HealthCheckDuration >= 1 { // require minimum health check duration of 1 sec.
|
||||
hcDuration = tcfg.HealthCheckDuration
|
||||
}
|
||||
tc := &TargetClient{
|
||||
Client: api,
|
||||
healthCheckDuration: hcDuration,
|
||||
bucket: tcfg.TargetBucket,
|
||||
replicateSync: tcfg.ReplicationSync,
|
||||
}
|
||||
go tc.healthCheck()
|
||||
return tc, nil
|
||||
}
|
||||
|
||||
// getRemoteARN gets existing ARN for an endpoint or generates a new one.
|
||||
func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTarget) string {
|
||||
if target == nil {
|
||||
return ""
|
||||
}
|
||||
tgts := sys.targetsMap[bucket]
|
||||
for _, tgt := range tgts {
|
||||
if tgt.Type == target.Type && tgt.TargetBucket == target.TargetBucket && target.URL().String() == tgt.URL().String() {
|
||||
return tgt.Arn
|
||||
}
|
||||
}
|
||||
if !madmin.ServiceType(target.Type).IsValid() {
|
||||
return ""
|
||||
}
|
||||
return generateARN(target)
|
||||
}
|
||||
|
||||
// generate ARN that is unique to this target type
|
||||
func generateARN(t *madmin.BucketTarget) string {
|
||||
hash := sha256.New()
|
||||
hash.Write([]byte(t.Type))
|
||||
hash.Write([]byte(t.Region))
|
||||
hash.Write([]byte(t.TargetBucket))
|
||||
hashSum := hex.EncodeToString(hash.Sum(nil))
|
||||
arn := madmin.ARN{
|
||||
Type: t.Type,
|
||||
ID: hashSum,
|
||||
Region: t.Region,
|
||||
Bucket: t.TargetBucket,
|
||||
}
|
||||
return arn.String()
|
||||
}
|
||||
|
||||
// Returns parsed target config. If KMS is configured, remote target is decrypted
|
||||
func parseBucketTargetConfig(bucket string, cdata, cmetadata []byte) (*madmin.BucketTargets, error) {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
t madmin.BucketTargets
|
||||
meta map[string]string
|
||||
)
|
||||
if len(cdata) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
data = cdata
|
||||
if len(cmetadata) != 0 {
|
||||
if err := json.Unmarshal(cmetadata, &meta); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if crypto.S3.IsEncrypted(meta) {
|
||||
if data, err = decryptBucketMetadata(cdata, bucket, meta, crypto.Context{bucket: bucket, bucketTargetsFile: bucketTargetsFile}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(data, &t); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &t, nil
|
||||
}
|
||||
|
||||
// TargetClient is the struct for remote target client.
|
||||
type TargetClient struct {
|
||||
*miniogo.Client
|
||||
up int32
|
||||
healthCheckDuration time.Duration
|
||||
bucket string // remote bucket target
|
||||
replicateSync bool
|
||||
}
|
||||
|
||||
func (tc *TargetClient) isOffline() bool {
|
||||
return atomic.LoadInt32(&tc.up) == 0
|
||||
}
|
||||
|
||||
func (tc *TargetClient) healthCheck() {
|
||||
for {
|
||||
_, err := tc.BucketExists(GlobalContext, tc.bucket)
|
||||
if err != nil {
|
||||
atomic.StoreInt32(&tc.up, 0)
|
||||
time.Sleep(tc.healthCheckDuration)
|
||||
continue
|
||||
}
|
||||
atomic.StoreInt32(&tc.up, 1)
|
||||
time.Sleep(tc.healthCheckDuration)
|
||||
}
|
||||
}
|
||||
@@ -40,7 +40,7 @@ const (
|
||||
func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketVersioning")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketVersioning", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -70,6 +70,14 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
||||
}, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "A replication configuration is present on this bucket, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(v)
|
||||
if err != nil {
|
||||
@@ -90,7 +98,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketVersioning")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketVersioning", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@@ -51,6 +51,11 @@ func (sys *BucketVersioningSys) Get(bucket string) (*versioning.Versioning, erro
|
||||
return globalBucketMetadataSys.GetVersioningConfig(bucket)
|
||||
}
|
||||
|
||||
// Reset BucketVersioningSys to initial state.
|
||||
func (sys *BucketVersioningSys) Reset() {
|
||||
// There is currently no internal state.
|
||||
}
|
||||
|
||||
// NewBucketVersioningSys - creates new versioning system.
|
||||
func NewBucketVersioningSys() *BucketVersioningSys {
|
||||
return &BucketVersioningSys{}
|
||||
|
||||
@@ -17,38 +17,72 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/fatih/color"
|
||||
dns2 "github.com/miekg/dns"
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/certs"
|
||||
"github.com/minio/minio/pkg/console"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
)
|
||||
|
||||
// serverDebugLog will enable debug printing
|
||||
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
logger.RegisterError(config.FmtError)
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(GlobalContext)
|
||||
logger.AddTarget(globalConsoleSys)
|
||||
// Inject into config package.
|
||||
config.Logger.Info = logger.Info
|
||||
config.Logger.LogIf = logger.LogIf
|
||||
|
||||
globalDNSCache = xhttp.NewDNSCache(10*time.Second, 10*time.Second, logger.LogOnceIf)
|
||||
|
||||
initGlobalContext()
|
||||
|
||||
globalForwarder = handlers.NewForwarder(&handlers.Forwarder{
|
||||
PassHost: true,
|
||||
RoundTripper: newGatewayHTTPTransport(1 * time.Hour),
|
||||
Logger: func(err error) {
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
globalReplicationState = newReplicationState()
|
||||
globalTransitionState = newTransitionState()
|
||||
|
||||
console.SetColor("Debug", color.New())
|
||||
|
||||
gob.Register(StorageErr(""))
|
||||
}
|
||||
|
||||
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
|
||||
if (globalAutoEncryption || GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
if (GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Encryption support is requested but '%s' does not support encryption", name)
|
||||
}
|
||||
@@ -60,25 +94,50 @@ func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
globalCompressConfigMu.Lock()
|
||||
if globalCompressConfig.Enabled && !objAPI.IsCompressionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Compression support is requested but '%s' does not support compression", name)
|
||||
}
|
||||
globalCompressConfigMu.Unlock()
|
||||
}
|
||||
|
||||
// Check for updates and print a notification message
|
||||
func checkUpdate(mode string) {
|
||||
// Its OK to ignore any errors during doUpdate() here.
|
||||
if updateMsg, _, currentReleaseTime, latestReleaseTime, err := getUpdateInfo(2*time.Second, mode); err == nil {
|
||||
if updateMsg == "" {
|
||||
return
|
||||
}
|
||||
if globalInplaceUpdateDisabled {
|
||||
logStartupMessage(updateMsg)
|
||||
} else {
|
||||
logStartupMessage(prepareUpdateMessage("Run `mc admin update`", latestReleaseTime.Sub(currentReleaseTime)))
|
||||
}
|
||||
updateURL := minioReleaseInfoURL
|
||||
if runtime.GOOS == globalWindowsOSName {
|
||||
updateURL = minioReleaseWindowsInfoURL
|
||||
}
|
||||
|
||||
u, err := url.Parse(updateURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Its OK to ignore any errors during doUpdate() here.
|
||||
crTime, err := GetCurrentReleaseTime()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, lrTime, err := getLatestReleaseTime(u, 2*time.Second, mode)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var older time.Duration
|
||||
var downloadURL string
|
||||
if lrTime.After(crTime) {
|
||||
older = lrTime.Sub(crTime)
|
||||
downloadURL = getDownloadURL(releaseTimeToReleaseTag(lrTime))
|
||||
}
|
||||
|
||||
updateMsg := prepareUpdateMessage(downloadURL, older)
|
||||
if updateMsg == "" {
|
||||
return
|
||||
}
|
||||
|
||||
logStartupMessage(prepareUpdateMessage("Run `mc admin update`", lrTime.Sub(crTime)))
|
||||
}
|
||||
|
||||
func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
|
||||
@@ -200,6 +259,14 @@ func handleCommonEnvVars() {
|
||||
}
|
||||
globalDomainNames = append(globalDomainNames, domainName)
|
||||
}
|
||||
sort.Strings(globalDomainNames)
|
||||
lcpSuf := lcpSuffix(globalDomainNames)
|
||||
for _, domainName := range globalDomainNames {
|
||||
if domainName == lcpSuf && len(globalDomainNames) > 1 {
|
||||
logger.Fatal(config.ErrOverlappingDomainValue(nil).Msg("Overlapping domains `%s` not allowed", globalDomainNames),
|
||||
"Invalid MINIO_DOMAIN value in environment variable")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
publicIPs := env.Get(config.EnvPublicIPs, "")
|
||||
@@ -223,7 +290,11 @@ func handleCommonEnvVars() {
|
||||
} else {
|
||||
// Add found interfaces IP address to global domain IPS,
|
||||
// loopback addresses will be naturally dropped.
|
||||
updateDomainIPs(mustGetLocalIP4())
|
||||
domainIPs := mustGetLocalIP4()
|
||||
for _, host := range globalEndpoints.Hostnames() {
|
||||
domainIPs.Add(host)
|
||||
}
|
||||
updateDomainIPs(domainIPs)
|
||||
}
|
||||
|
||||
// In place update is true by default if the MINIO_UPDATE is not set
|
||||
@@ -241,6 +312,16 @@ func handleCommonEnvVars() {
|
||||
globalConfigEncrypted = true
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvRootUser) || env.IsSet(config.EnvRootPassword) {
|
||||
cred, err := auth.CreateCredentials(env.Get(config.EnvRootUser, ""), env.Get(config.EnvRootPassword, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the shell environment")
|
||||
}
|
||||
globalActiveCred = cred
|
||||
globalConfigEncrypted = true
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvAccessKeyOld) && env.IsSet(config.EnvSecretKeyOld) {
|
||||
oldCred, err := auth.CreateCredentials(env.Get(config.EnvAccessKeyOld, ""), env.Get(config.EnvSecretKeyOld, ""))
|
||||
if err != nil {
|
||||
@@ -251,6 +332,17 @@ func handleCommonEnvVars() {
|
||||
os.Unsetenv(config.EnvAccessKeyOld)
|
||||
os.Unsetenv(config.EnvSecretKeyOld)
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvRootUserOld) && env.IsSet(config.EnvRootPasswordOld) {
|
||||
oldCred, err := auth.CreateCredentials(env.Get(config.EnvRootUserOld, ""), env.Get(config.EnvRootPasswordOld, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate the old credentials inherited from the shell environment")
|
||||
}
|
||||
globalOldCred = oldCred
|
||||
os.Unsetenv(config.EnvRootUserOld)
|
||||
os.Unsetenv(config.EnvRootPasswordOld)
|
||||
}
|
||||
}
|
||||
|
||||
func logStartupMessage(msg string) {
|
||||
@@ -260,7 +352,7 @@ func logStartupMessage(msg string) {
|
||||
logger.StartupMessage(msg)
|
||||
}
|
||||
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn bool, err error) {
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
|
||||
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
|
||||
return nil, nil, false, nil
|
||||
}
|
||||
@@ -269,11 +361,71 @@ func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn b
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
c, err = certs.New(getPublicCertFile(), getPrivateKeyFile(), config.LoadX509KeyPair)
|
||||
manager, err = certs.NewManager(GlobalContext, getPublicCertFile(), getPrivateKeyFile(), config.LoadX509KeyPair)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
// MinIO has support for multiple certificates. It expects the following structure:
|
||||
// certs/
|
||||
// │
|
||||
// ├─ public.crt
|
||||
// ├─ private.key
|
||||
// │
|
||||
// ├─ example.com/
|
||||
// │ │
|
||||
// │ ├─ public.crt
|
||||
// │ └─ private.key
|
||||
// └─ foobar.org/
|
||||
// │
|
||||
// ├─ public.crt
|
||||
// └─ private.key
|
||||
// ...
|
||||
//
|
||||
// Therefore, we read all filenames in the cert directory and check
|
||||
// for each directory whether it contains a public.crt and private.key.
|
||||
// If so, we try to add it to certificate manager.
|
||||
root, err := os.Open(globalCertsDir.Get())
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
defer root.Close()
|
||||
|
||||
files, err := root.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
for _, file := range files {
|
||||
// Ignore all
|
||||
// - regular files
|
||||
// - "CAs" directory
|
||||
// - any directory which starts with ".."
|
||||
if file.Mode().IsRegular() || file.Name() == "CAs" || strings.HasPrefix(file.Name(), "..") {
|
||||
continue
|
||||
}
|
||||
if file.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
file, err = os.Stat(filepath.Join(root.Name(), file.Name()))
|
||||
if err != nil {
|
||||
// not accessible ignore
|
||||
continue
|
||||
}
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
certFile = filepath.Join(root.Name(), file.Name(), publicCertFile)
|
||||
keyFile = filepath.Join(root.Name(), file.Name(), privateKeyFile)
|
||||
)
|
||||
if !isFile(certFile) || !isFile(keyFile) {
|
||||
continue
|
||||
}
|
||||
if err = manager.AddCertificate(certFile, keyFile); err != nil {
|
||||
err = fmt.Errorf("Unable to load TLS certificate '%s,%s': %w", certFile, keyFile, err)
|
||||
logger.LogIf(GlobalContext, err, logger.Minio)
|
||||
}
|
||||
}
|
||||
secureConn = true
|
||||
return x509Certs, c, secureConn, nil
|
||||
return x509Certs, manager, secureConn, nil
|
||||
}
|
||||
|
||||
@@ -20,36 +20,42 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
var errConfigNotFound = errors.New("config file not found")
|
||||
|
||||
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
// Read entire content by setting size to -1
|
||||
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
|
||||
r, err := objAPI.GetObjectNInfo(ctx, minioMetaBucket, configFile, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if err != nil {
|
||||
// Treat object not found as config not found.
|
||||
if isErrObjectNotFound(err) {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Return config not found on empty content.
|
||||
if buffer.Len() == 0 {
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(buf) == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
type objectDeleter interface {
|
||||
DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error)
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI objectDeleter, configFile string) error {
|
||||
_, err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile, ObjectOptions{})
|
||||
if err != nil && isErrObjectNotFound(err) {
|
||||
return errConfigNotFound
|
||||
@@ -58,12 +64,12 @@ func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) er
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)), globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader, nil, nil), ObjectOptions{})
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader), ObjectOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -74,8 +80,6 @@ func checkConfig(ctx context.Context, objAPI ObjectLayer, configFile string) err
|
||||
return errConfigNotFound
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user