Compare commits
500 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cfc5681b36 | ||
|
|
369a876ebe | ||
|
|
778e9c864f | ||
|
|
a2616b8227 | ||
|
|
8d8f28eae4 | ||
|
|
e7d7d5232c | ||
|
|
1d65ef3201 | ||
|
|
8d425e3372 | ||
|
|
3939c6f6e7 | ||
|
|
60d91234b9 | ||
|
|
37c14207d6 | ||
|
|
3b9fbf80ad | ||
|
|
c2fdf73491 | ||
|
|
143f9371c6 | ||
|
|
3f1902face | ||
|
|
c0adb52213 | ||
|
|
3520e946a2 | ||
|
|
f38adc1865 | ||
|
|
d5ff1c8e3b | ||
|
|
ad7417bc50 | ||
|
|
2d17c16d93 | ||
|
|
aded0bc81a | ||
|
|
36d36fab0b | ||
|
|
ba756cf366 | ||
|
|
c00d410e61 | ||
|
|
968342c732 | ||
|
|
5c15656c55 | ||
|
|
2e8fc6ebfe | ||
|
|
efe9fe6124 | ||
|
|
30c251efd3 | ||
|
|
c850905e43 | ||
|
|
a317a2531c | ||
|
|
ee20ebe07a | ||
|
|
2743d4ca87 | ||
|
|
6136a963c8 | ||
|
|
60417950c7 | ||
|
|
fa211f6a10 | ||
|
|
72e0745e2f | ||
|
|
aa4d1021eb | ||
|
|
93e7e4a0e5 | ||
|
|
c2f7cd1104 | ||
|
|
38eef5ce4c | ||
|
|
4cf80f96ad | ||
|
|
d4af132fc4 | ||
|
|
c087a05b43 | ||
|
|
cdb0e6ffed | ||
|
|
03b84091fc | ||
|
|
2be20588bf | ||
|
|
e59ee14f40 | ||
|
|
21a37e3393 | ||
|
|
810a4f0723 | ||
|
|
4c266df863 | ||
|
|
abd999f64a | ||
|
|
04de19c870 | ||
|
|
648cb13e02 | ||
|
|
174f428571 | ||
|
|
5388ae4acb | ||
|
|
2e338e84cb | ||
|
|
5089a7167d | ||
|
|
c0ac25bfff | ||
|
|
27a1f3ed2b | ||
|
|
90f36c1389 | ||
|
|
91817d0d1a | ||
|
|
55a3b071ea | ||
|
|
a38ce29137 | ||
|
|
7cea3f7da4 | ||
|
|
dcffd87e08 | ||
|
|
3b813148b3 | ||
|
|
4bba2cd034 | ||
|
|
28a1a17187 | ||
|
|
a5e879fbe4 | ||
|
|
f7f12b8604 | ||
|
|
cf5d051afc | ||
|
|
2f681bed57 | ||
|
|
2d0f65a5e3 | ||
|
|
b1705599e1 | ||
|
|
969b2d2110 | ||
|
|
f4b2ed2a92 | ||
|
|
dee3cf2d7f | ||
|
|
b460b5967f | ||
|
|
21058c34d0 | ||
|
|
5b1e6c7dbc | ||
|
|
691dc04fac | ||
|
|
e92434c2e7 | ||
|
|
cae09d8b84 | ||
|
|
f706a5b4c8 | ||
|
|
c54e3b4ea3 | ||
|
|
0fff9f9fa6 | ||
|
|
0becf7f03f | ||
|
|
972d876ca9 | ||
|
|
b8cb21c954 | ||
|
|
67062840c1 | ||
|
|
53f0cc1340 | ||
|
|
9626a981bc | ||
|
|
b912c8f035 | ||
|
|
fa13fe2184 | ||
|
|
85a1956e5c | ||
|
|
72743d1590 | ||
|
|
7ed1077879 | ||
|
|
16d7b90adf | ||
|
|
94424e14d7 | ||
|
|
e79874f58e | ||
|
|
8aae8b1d27 | ||
|
|
1813ff9dfa | ||
|
|
4ac31ea82b | ||
|
|
67ca157329 | ||
|
|
7c061fa3b6 | ||
|
|
f5e1b3d09e | ||
|
|
3ba4804d6c | ||
|
|
216de230e2 | ||
|
|
a91cfa03e7 | ||
|
|
087aaaf894 | ||
|
|
cbb7a09376 | ||
|
|
1a956424e0 | ||
|
|
f9aa239973 | ||
|
|
2073b79633 | ||
|
|
6a6a30c33c | ||
|
|
1cd5d7942f | ||
|
|
63e9005f01 | ||
|
|
d55f4336ae | ||
|
|
535efd34a0 | ||
|
|
e11e4bcbc7 | ||
|
|
4915433bd2 | ||
|
|
43d6e3ae06 | ||
|
|
225b812b5e | ||
|
|
a53cb236bb | ||
|
|
761992ca89 | ||
|
|
96ed0991b5 | ||
|
|
a42df3d364 | ||
|
|
ff94b1b0a9 | ||
|
|
b1845c6c83 | ||
|
|
62b1da3e2c | ||
|
|
d26b24f670 | ||
|
|
8ba40e492c | ||
|
|
2c372a9894 | ||
|
|
3d3b75fb8d | ||
|
|
142b057be8 | ||
|
|
4790868878 | ||
|
|
342ade03f6 | ||
|
|
1a0b7f58f9 | ||
|
|
9407dbf387 | ||
|
|
423aeb0d81 | ||
|
|
790323ac37 | ||
|
|
920b863955 | ||
|
|
c1382c09d9 | ||
|
|
febe9cc26a | ||
|
|
2ce2e88adf | ||
|
|
c7599d323b | ||
|
|
e906b511e9 | ||
|
|
4dd07e5763 | ||
|
|
d93bdea433 | ||
|
|
26cfd52e7e | ||
|
|
2d7a96342c | ||
|
|
cdd6c9f52e | ||
|
|
5e529a1c96 | ||
|
|
7fee96e9de | ||
|
|
5686a7e273 | ||
|
|
b91040f7fb | ||
|
|
566e0e2048 | ||
|
|
3aad09be28 | ||
|
|
f0358acb32 | ||
|
|
fd0de4ab32 | ||
|
|
73a308502f | ||
|
|
bd59f150b8 | ||
|
|
4b68d69188 | ||
|
|
f90422a890 | ||
|
|
8befedef14 | ||
|
|
2af3004409 | ||
|
|
38ee40d59c | ||
|
|
d583f1ac0e | ||
|
|
2bcb02f628 | ||
|
|
4301a33371 | ||
|
|
f1a03a4ee8 | ||
|
|
167ddf9c9c | ||
|
|
f833e41e69 | ||
|
|
3780ec699f | ||
|
|
41688a936b | ||
|
|
b2db8123ec | ||
|
|
b330c2c57e | ||
|
|
231c5cf6de | ||
|
|
7214a0160a | ||
|
|
375b79f11b | ||
|
|
661068d1a2 | ||
|
|
3da1869d5e | ||
|
|
000a7aa094 | ||
|
|
7cedc5369d | ||
|
|
e5ecd20d44 | ||
|
|
53aaa5d2a5 | ||
|
|
cccf2de129 | ||
|
|
9d39fb3604 | ||
|
|
4a007e3767 | ||
|
|
95814359bd | ||
|
|
de6c286258 | ||
|
|
d0ae69087c | ||
|
|
5e15b0b844 | ||
|
|
7ea026ff1d | ||
|
|
6e0575a53d | ||
|
|
fb9be81fab | ||
|
|
60791d6dd1 | ||
|
|
eba423bb9d | ||
|
|
301de169e9 | ||
|
|
1868c7016d | ||
|
|
0c71ce3398 | ||
|
|
bc285cf0dd | ||
|
|
209a3ee7af | ||
|
|
7d19ab9f62 | ||
|
|
3f6d624c7b | ||
|
|
c138272d63 | ||
|
|
7dbfea1353 | ||
|
|
c121d27f31 | ||
|
|
43c19a6b82 | ||
|
|
6542bc4a03 | ||
|
|
bede525dc9 | ||
|
|
e45c90060f | ||
|
|
7d79c723e5 | ||
|
|
5f6d6c3b70 | ||
|
|
d15042470e | ||
|
|
f1f414ca59 | ||
|
|
cdf4815a6b | ||
|
|
fade056244 | ||
|
|
a546047c95 | ||
|
|
ea210319ce | ||
|
|
2896e780ae | ||
|
|
eaafb23535 | ||
|
|
baa30f4289 | ||
|
|
2164984d2b | ||
|
|
189c861835 | ||
|
|
6656fa3066 | ||
|
|
0cc2ed04f5 | ||
|
|
b11adfa5cd | ||
|
|
9baeda781a | ||
|
|
bd032d13ff | ||
|
|
730775bb4e | ||
|
|
d31eaddba3 | ||
|
|
3202f78f0f | ||
|
|
6de410a0aa | ||
|
|
a3f41c7049 | ||
|
|
1847f17f50 | ||
|
|
1bc32215b9 | ||
|
|
96009975d6 | ||
|
|
de9b391db3 | ||
|
|
a62572fb86 | ||
|
|
011a2c0b78 | ||
|
|
daf4418cbb | ||
|
|
0f8df3d340 | ||
|
|
9cac385aec | ||
|
|
814ddc0923 | ||
|
|
247795dd36 | ||
|
|
dfadf70a7f | ||
|
|
d348ec0f6c | ||
|
|
b730bd1396 | ||
|
|
56e0c6adf8 | ||
|
|
f44a960dcd | ||
|
|
6c1bbf918d | ||
|
|
48e614b167 | ||
|
|
fe8d33452b | ||
|
|
c19ece6921 | ||
|
|
216fa57b88 | ||
|
|
a9558ae248 | ||
|
|
ee9077db7d | ||
|
|
9c85928740 | ||
|
|
af0309371e | ||
|
|
ead3c186a6 | ||
|
|
6ac48a65cb | ||
|
|
2ecf5ba1de | ||
|
|
94f1a1dea3 | ||
|
|
c045ae15e7 | ||
|
|
1756b7c6ff | ||
|
|
e25ace2151 | ||
|
|
a8e5a86fa0 | ||
|
|
f8edc233ab | ||
|
|
337c2a7cb4 | ||
|
|
2d735144b9 | ||
|
|
0113035237 | ||
|
|
52a1d248b2 | ||
|
|
b5ed42c845 | ||
|
|
d9e7cadacf | ||
|
|
36e88cbd50 | ||
|
|
6d76efb9bb | ||
|
|
a1de9cec58 | ||
|
|
6885c72f32 | ||
|
|
0f1389e992 | ||
|
|
5bf3eeaa77 | ||
|
|
9dda1fd624 | ||
|
|
2dc46cb153 | ||
|
|
0674c0075e | ||
|
|
518ef670da | ||
|
|
0dd626ec67 | ||
|
|
53f4c0fdc0 | ||
|
|
7e3ea77fdf | ||
|
|
7290d23b26 | ||
|
|
24f20eb1bd | ||
|
|
4c9de098b0 | ||
|
|
a2ccba69e5 | ||
|
|
8eb99d3a87 | ||
|
|
3773874cd3 | ||
|
|
6c62b1a2ea | ||
|
|
b768645fde | ||
|
|
7b58dcb28c | ||
|
|
fea4a1e68e | ||
|
|
a9e83dd42c | ||
|
|
145f501a21 | ||
|
|
9b3b04ecec | ||
|
|
3e063cca5c | ||
|
|
27d716c663 | ||
|
|
fbd15cb7b7 | ||
|
|
f7c91eff54 | ||
|
|
a6bdc086a2 | ||
|
|
1242dd951a | ||
|
|
d1c8e9f31b | ||
|
|
83ccae6c8b | ||
|
|
086be07bf5 | ||
|
|
28f9c477a8 | ||
|
|
09571d03a5 | ||
|
|
71ce63f79c | ||
|
|
5205c9591f | ||
|
|
9a547dcbfb | ||
|
|
27632ca6ec | ||
|
|
c7470e6e6e | ||
|
|
d090a17ed0 | ||
|
|
c2529260e7 | ||
|
|
da87188ff8 | ||
|
|
d099039f5d | ||
|
|
5dd9cf4398 | ||
|
|
ab77b216d1 | ||
|
|
b37a02cddf | ||
|
|
c3c3e9087b | ||
|
|
7ad6bc955f | ||
|
|
7a5271ad96 | ||
|
|
1b122526aa | ||
|
|
a3b266761e | ||
|
|
498389123e | ||
|
|
bc61417284 | ||
|
|
97d952e61c | ||
|
|
073aac3d92 | ||
|
|
eff4127efd | ||
|
|
b1c0c32ba6 | ||
|
|
f14bf25cb9 | ||
|
|
f216670814 | ||
|
|
6ecc98fddb | ||
|
|
4843affd0e | ||
|
|
558785a4bb | ||
|
|
60d415bb8a | ||
|
|
45e22cf8aa | ||
|
|
e4900b99d7 | ||
|
|
20766069a8 | ||
|
|
e8160c9fae | ||
|
|
957ecb1b64 | ||
|
|
ac5061df2c | ||
|
|
cddb2714ef | ||
|
|
d7d9cac20b | ||
|
|
6817c5ea58 | ||
|
|
4cd6ca02c7 | ||
|
|
a5efcbab51 | ||
|
|
85be7b39ac | ||
|
|
ebf3dda449 | ||
|
|
582953260b | ||
|
|
2d1ea86fc6 | ||
|
|
322385f1b6 | ||
|
|
1b38aed05f | ||
|
|
a69c98e394 | ||
|
|
282c9f790a | ||
|
|
2eeb0e6a0b | ||
|
|
3ff5bf2369 | ||
|
|
69ee28a082 | ||
|
|
d02deff3d7 | ||
|
|
3e78ea8acc | ||
|
|
b54c0f0ef3 | ||
|
|
c79358c67e | ||
|
|
75107d7698 | ||
|
|
c4464e36c8 | ||
|
|
d92db198d1 | ||
|
|
8bae956df6 | ||
|
|
7758524703 | ||
|
|
c82fa2c829 | ||
|
|
a51280fd20 | ||
|
|
bd437c1c17 | ||
|
|
69fb68ef0b | ||
|
|
787dbaff36 | ||
|
|
c50ae1fdbe | ||
|
|
bde0f444db | ||
|
|
6a8298b137 | ||
|
|
f19cbfad5c | ||
|
|
78f2183e70 | ||
|
|
5c11a46412 | ||
|
|
8a94aebdb8 | ||
|
|
ec11e99667 | ||
|
|
37d066b563 | ||
|
|
bfec5fe200 | ||
|
|
525287f4b6 | ||
|
|
9054ce73b2 | ||
|
|
d079adc167 | ||
|
|
a9d401ac10 | ||
|
|
1fa65c7f2f | ||
|
|
cc9b63eb51 | ||
|
|
7e12eab3ad | ||
|
|
fa685d7d9c | ||
|
|
4314ee1670 | ||
|
|
7d636a7c13 | ||
|
|
bf9d51cf14 | ||
|
|
29e0727b58 | ||
|
|
c434dff0a4 | ||
|
|
b2a8cb4aba | ||
|
|
b412a222ae | ||
|
|
79bcb705bf | ||
|
|
9f81d014f1 | ||
|
|
3184205519 | ||
|
|
7c919329e8 | ||
|
|
db41953618 | ||
|
|
cea078a593 | ||
|
|
f44cfb2863 | ||
|
|
1b45be0d60 | ||
|
|
6bb693488c | ||
|
|
e20e08d700 | ||
|
|
ac07df2985 | ||
|
|
2054ca5c9a | ||
|
|
e51e465543 | ||
|
|
2bbc6a83e8 | ||
|
|
a78731a3ba | ||
|
|
f4e779c964 | ||
|
|
a973402821 | ||
|
|
44decbeae0 | ||
|
|
3ea1be3c52 | ||
|
|
2642e12d14 | ||
|
|
e7276b7b9b | ||
|
|
e375341c33 | ||
|
|
2c20716f37 | ||
|
|
928f5b0564 | ||
|
|
91f21ddc47 | ||
|
|
43a3778b45 | ||
|
|
b9b1bfefe7 | ||
|
|
05cda35b14 | ||
|
|
2155e74951 | ||
|
|
4714958e99 | ||
|
|
c6e62b9175 | ||
|
|
ab66b23194 | ||
|
|
73f9d8a636 | ||
|
|
541a778d7b | ||
|
|
d49f2ec19c | ||
|
|
8dd63a462f | ||
|
|
9902c9baaa | ||
|
|
7de29e6e6b | ||
|
|
336460f67e | ||
|
|
95e89f1712 | ||
|
|
886ae15464 | ||
|
|
d8af244708 | ||
|
|
c8243706b4 | ||
|
|
30707659b5 | ||
|
|
90c365a174 | ||
|
|
7b732b566f | ||
|
|
ba52a925f9 | ||
|
|
fdda5f98c6 | ||
|
|
fa4d627b57 | ||
|
|
2c3e34f001 | ||
|
|
7f8f1ad4e3 | ||
|
|
6f992134a2 | ||
|
|
0c80bf45d0 | ||
|
|
2777956581 | ||
|
|
b207520d98 | ||
|
|
2196fd9cd5 | ||
|
|
ef6304c5c2 | ||
|
|
6b984410d5 | ||
|
|
813e0fc1a8 | ||
|
|
38cf263409 | ||
|
|
6f6a2214fc | ||
|
|
791821d590 | ||
|
|
9a951da881 | ||
|
|
e7a0be5bd3 | ||
|
|
ff932ca2a0 | ||
|
|
818d3bcaf5 | ||
|
|
45b1c66195 | ||
|
|
da04cb91ce | ||
|
|
cfc9cfd84a | ||
|
|
ea18e51f4d | ||
|
|
3d3beb6a9d | ||
|
|
27b8f18cce | ||
|
|
bf545dc320 | ||
|
|
f001e99fcd | ||
|
|
b4bfdc92cc | ||
|
|
ae654831aa | ||
|
|
1ffa983a9d | ||
|
|
ecf1566266 | ||
|
|
c5b87f93dd | ||
|
|
b1a2169dcc | ||
|
|
d45a1808f2 | ||
|
|
db2155551a | ||
|
|
09d35d3b4c | ||
|
|
5b9342d35c | ||
|
|
8d98662633 | ||
|
|
7fdeb44372 | ||
|
|
59dced8237 | ||
|
|
496f4a7dc7 | ||
|
|
8b880a246a | ||
|
|
eeb5942b6b | ||
|
|
7ec904d67b | ||
|
|
c9212819af | ||
|
|
10fd53d6bb | ||
|
|
3fea1d5e35 | ||
|
|
35ecc04223 | ||
|
|
3ca9f5ffa3 |
17
.github/ISSUE_TEMPLATE.md
vendored
17
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,3 +1,12 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: community, triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
@@ -15,6 +24,8 @@
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. Include code to reproduce, if relevant -->
|
||||
<!--- and make sure you have followed https://github.com/minio/minio/tree/release/docs/debugging to capture relevant logs -->
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
@@ -30,8 +41,6 @@
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used (`minio version`):
|
||||
* Environment name and version (e.g. nginx 1.9.1):
|
||||
* Server type and version:
|
||||
* Version used (`minio --version`):
|
||||
* Server setup and configuration:
|
||||
* Operating System and version (`uname -a`):
|
||||
* Link to your project:
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/bug_report.md
vendored
8
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -24,6 +24,8 @@ assignees: ''
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. Include code to reproduce, if relevant -->
|
||||
<!--- and make sure you have followed https://github.com/minio/minio/tree/release/docs/debugging to capture relevant logs -->
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
@@ -39,8 +41,6 @@ assignees: ''
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used (`minio version`):
|
||||
* Environment name and version (e.g. nginx 1.9.1):
|
||||
* Server type and version:
|
||||
* Version used (`minio --version`):
|
||||
* Server setup and configuration:
|
||||
* Operating System and version (`uname -a`):
|
||||
* Link to your project:
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: MinIO Community Support
|
||||
url: https://slack.min.io
|
||||
about: Join here for Community Support
|
||||
- name: MinIO SUBNET Support
|
||||
url: https://min.io/pricing
|
||||
about: Join here for Enterprise Support
|
||||
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -16,4 +16,3 @@
|
||||
- [ ] Fixes a regression (If yes, please add `commit-id` or `PR #` here)
|
||||
- [ ] Documentation needed
|
||||
- [ ] Unit tests needed
|
||||
- [ ] Functional tests needed (If yes, add [mint](https://github.com/minio/mint) PR # here: )
|
||||
|
||||
39
.github/lock.yml
vendored
Normal file
39
.github/lock.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
# Configuration for Lock Threads - https://github.com/dessant/lock-threads-app
|
||||
|
||||
# Number of days of inactivity before a closed issue or pull request is locked
|
||||
daysUntilLock: 365
|
||||
|
||||
# Skip issues and pull requests created before a given timestamp. Timestamp must
|
||||
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
|
||||
skipCreatedBefore: false
|
||||
|
||||
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
|
||||
exemptLabels: []
|
||||
|
||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
||||
lockLabel: true
|
||||
|
||||
# Comment to post before locking. Set to `false` to disable
|
||||
lockComment: >-
|
||||
|
||||
This thread has been automatically locked since there has not been
|
||||
any recent activity after it was closed. Please open a new issue for
|
||||
related bugs.
|
||||
|
||||
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
||||
setLockReason: true
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
only: issues
|
||||
|
||||
# Optionally, specify configuration settings just for `issues` or `pulls`
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - help-wanted
|
||||
# lockLabel: outdated
|
||||
|
||||
# pulls:
|
||||
# daysUntilLock: 30
|
||||
|
||||
# Repository to extend settings from
|
||||
# _extends: repo
|
||||
59
.github/stale.yml
vendored
Normal file
59
.github/stale.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
# Configuration for probot-stale - https://github.com/probot/stale
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||
daysUntilStale: 30
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||
daysUntilClose: 15
|
||||
|
||||
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||
onlyLabels: []
|
||||
|
||||
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||
exemptLabels:
|
||||
- "security"
|
||||
- "pending discussion"
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
# Set to true to ignore issues in a milestone (defaults to false)
|
||||
exemptMilestones: false
|
||||
|
||||
# Set to true to ignore issues with an assignee (defaults to false)
|
||||
exemptAssignees: false
|
||||
|
||||
# Label to use when marking as stale
|
||||
staleLabel: stale
|
||||
|
||||
# Comment to post when marking as stale. Set to `false` to disable
|
||||
markComment: >-
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed after 15 days if no further activity
|
||||
occurs. Thank you for your contributions.
|
||||
# Comment to post when removing the stale label.
|
||||
# unmarkComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Comment to post when closing a stale Issue or Pull Request.
|
||||
# closeComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||
limitPerRun: 1
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
# only: issues
|
||||
|
||||
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||
# pulls:
|
||||
# daysUntilStale: 30
|
||||
# markComment: >
|
||||
# This pull request has been automatically marked as stale because it has not had
|
||||
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||
# for your contributions.
|
||||
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - confirmed
|
||||
51
.github/workflows/codeql.yml
vendored
Normal file
51
.github/workflows/codeql.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 19 * * 0'
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
|
||||
# CodeQL runs on ubuntu-latest and windows-latest
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: go, javascript
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
50
.github/workflows/go.yml
vendored
Normal file
50
.github/workflows/go.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: Go
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- release
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Test on Go ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.13.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12'
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
MINIO_CI_CD: 1
|
||||
run: |
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 50m ./...
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
MINIO_CI_CD: 1
|
||||
run: |
|
||||
sudo apt-get install devscripts shellcheck
|
||||
make
|
||||
diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
make test-race
|
||||
make crosscompile
|
||||
make verify
|
||||
make verify-healing
|
||||
cd browser && npm install && npm run test && cd ..
|
||||
bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
|
||||
27
.golangci.yml
Normal file
27
.golangci.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
linters-settings:
|
||||
golint:
|
||||
min-confidence: 0
|
||||
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- typecheck
|
||||
- goimports
|
||||
- misspell
|
||||
- govet
|
||||
- golint
|
||||
- ineffassign
|
||||
- gosimple
|
||||
- deadcode
|
||||
- structcheck
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
service:
|
||||
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
167
.goreleaser.yml
Normal file
167
.goreleaser.yml
Normal file
@@ -0,0 +1,167 @@
|
||||
project_name: minio
|
||||
|
||||
release:
|
||||
name_template: "Version {{.MinIO.Version}}"
|
||||
disable: true
|
||||
github:
|
||||
owner: minio
|
||||
name: minio
|
||||
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- make clean
|
||||
- go generate ./...
|
||||
- go mod tidy
|
||||
- go mod download
|
||||
|
||||
builds:
|
||||
-
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
- windows
|
||||
- freebsd
|
||||
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm
|
||||
- ppc64le
|
||||
- s390x
|
||||
|
||||
goarm:
|
||||
- 7
|
||||
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: arm64
|
||||
- goos: darwin
|
||||
goarch: arm
|
||||
- goos: darwin
|
||||
goarch: ppc64le
|
||||
- goos: darwin
|
||||
goarch: s390x
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
- goos: windows
|
||||
goarch: arm
|
||||
- goos: windows
|
||||
goarch: ppc64le
|
||||
- goos: windows
|
||||
goarch: s390x
|
||||
- goos: freebsd
|
||||
goarch: arm
|
||||
- goos: freebsd
|
||||
goarch: arm64
|
||||
- goos: freebsd
|
||||
goarch: ppc64le
|
||||
- goos: freebsd
|
||||
goarch: s390x
|
||||
|
||||
flags:
|
||||
- -tags=kqueue
|
||||
- -trimpath
|
||||
|
||||
ldflags:
|
||||
- "-s -w -X github.com/minio/minio/cmd.Version={{.Version}} -X github.com/minio/minio/cmd.ReleaseTag={{.Tag}} -X github.com/minio/minio/cmd.CommitID={{.FullCommit}} -X github.com/minio/minio/cmd.ShortCommitID={{.ShortCommit}}"
|
||||
|
||||
archives:
|
||||
-
|
||||
format: binary
|
||||
name_template: "{{ .Binary }}-release/{{ .Os }}-{{ .Arch }}/{{ .Binary }}.{{ .Version }}"
|
||||
|
||||
nfpms:
|
||||
-
|
||||
id: minio
|
||||
package_name: minio
|
||||
vendor: MinIO, Inc.
|
||||
homepage: https://min.io/
|
||||
maintainer: dev@min.io
|
||||
description: MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
license: Apache 2.0
|
||||
bindir: /usr/bin
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
overrides:
|
||||
deb:
|
||||
file_name_template: "{{ .Binary }}-release/debs/{{ .ProjectName }}-{{ .Version }}_{{ .Arch }}"
|
||||
replacements:
|
||||
arm: armv7
|
||||
files:
|
||||
"NOTICE": "/usr/share/minio/NOTICE"
|
||||
"CREDITS": "/usr/share/minio/CREDITS"
|
||||
"LICENSE": "/usr/share/minio/LICENSE"
|
||||
"README.md": "/usr/share/minio/README.md"
|
||||
rpm:
|
||||
file_name_template: "{{ .Binary }}-release/rpms/{{ .ProjectName }}-{{ .Version }}.{{ .Arch }}"
|
||||
replacements:
|
||||
amd64: x86_64
|
||||
arm64: aarch64
|
||||
arm: armv7
|
||||
files:
|
||||
"NOTICE": "/usr/share/minio/NOTICE"
|
||||
"CREDITS": "/usr/share/minio/CREDITS"
|
||||
"LICENSE": "/usr/share/minio/LICENSE"
|
||||
"README.md": "/usr/share/minio/README.md"
|
||||
|
||||
checksum:
|
||||
algorithm: sha256
|
||||
|
||||
signs:
|
||||
-
|
||||
signature: "${artifact}.asc"
|
||||
cmd: "sh"
|
||||
args:
|
||||
- '-c'
|
||||
- 'gpg --quiet --detach-sign -a ${artifact}'
|
||||
artifacts: all
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^Update yaml files'
|
||||
|
||||
dockers:
|
||||
-
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
dockerfile: Dockerfile.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}
|
||||
- minio/minio:latest
|
||||
|
||||
-
|
||||
goos: linux
|
||||
goarch: ppc64le
|
||||
dockerfile: Dockerfile.ppc64le.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}-ppc64le
|
||||
|
||||
-
|
||||
goos: linux
|
||||
goarch: s390x
|
||||
dockerfile: Dockerfile.s390x.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}-s390x
|
||||
|
||||
-
|
||||
goos: linux
|
||||
goarch: arm64
|
||||
goarm: ''
|
||||
dockerfile: Dockerfile.arm64.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}-arm64
|
||||
|
||||
-
|
||||
goos: linux
|
||||
goarch: arm
|
||||
goarm: '7'
|
||||
dockerfile: Dockerfile.arm.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}-arm
|
||||
58
.travis.yml
58
.travis.yml
@@ -1,58 +0,0 @@
|
||||
go_import_path: github.com/minio/minio
|
||||
|
||||
language: go
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- shellcheck
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
# this ensures PRs based on a local branch are not built twice
|
||||
# the downside is that a PR targeting a different branch is not built
|
||||
# but as a workaround you can add the branch to this list
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
- SIMPLE_CI=1
|
||||
go: 1.13.x
|
||||
script:
|
||||
- make
|
||||
- diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
- diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
- make test-race
|
||||
- make crosscompile
|
||||
- make verify
|
||||
- cd browser && npm install && npm run test && cd ..
|
||||
- bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
|
||||
|
||||
- os: windows
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
- SIMPLE_CI=1
|
||||
go: 1.13.x
|
||||
script:
|
||||
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
|
||||
- for d in $(go list ./... | grep -v browser); do go test -v --timeout 20m "$d"; done
|
||||
|
||||
before_script:
|
||||
# Add an IPv6 config - see the corresponding Travis issue
|
||||
# https://github.com/travis-ci/travis-ci/issues/8361
|
||||
- if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'; fi
|
||||
|
||||
before_install:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install stable ; fi
|
||||
@@ -1,4 +1,4 @@
|
||||
# MinIO Contribution Guide [](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
# MinIO Contribution Guide [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
``MinIO`` community welcomes your contribution. To make the process as seamless as possible, we recommend you read this contribution guide.
|
||||
|
||||
|
||||
12
Dockerfile
12
Dockerfile
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.13-alpine
|
||||
FROM golang:1.13-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
@@ -9,9 +9,9 @@ ENV GO111MODULE on
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio && cd minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
git checkout master && go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM alpine:3.10
|
||||
FROM alpine:3.12
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
@@ -21,9 +21,9 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=0 /go/bin/minio /usr/bin/minio
|
||||
COPY CREDITS /third_party/
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY --from=builder /go/bin/minio /usr/bin/minio
|
||||
COPY --from=builder /go/minio/CREDITS /third_party/
|
||||
COPY --from=builder /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
|
||||
@@ -1,23 +1,9 @@
|
||||
FROM golang:1.13-alpine as builder
|
||||
|
||||
WORKDIR /home
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git 'curl>7.61.0' && \
|
||||
git clone https://github.com/minio/minio && \
|
||||
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
|
||||
|
||||
FROM arm32v7/alpine:3.10
|
||||
FROM multiarch/qemu-user-static:x86_64-arm as qemu
|
||||
FROM arm32v7/alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
|
||||
COPY --from=qemu /usr/bin/qemu-arm-static /usr/bin
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
@@ -28,9 +14,11 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.min.io/server/minio/release/linux-arm/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-arm/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
@@ -1,23 +1,9 @@
|
||||
FROM golang:1.13-alpine as builder
|
||||
|
||||
WORKDIR /home
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git 'curl>7.61.0' && \
|
||||
git clone https://github.com/minio/minio && \
|
||||
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
|
||||
|
||||
FROM arm64v8/alpine:3.10
|
||||
FROM multiarch/qemu-user-static:x86_64-aarch64 as qemu
|
||||
FROM arm64v8/alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
|
||||
COPY --from=qemu /usr/bin/qemu-aarch64-static /usr/bin
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
@@ -28,9 +14,11 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.min.io/server/minio/release/linux-arm64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-arm64/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.10
|
||||
FROM alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
|
||||
12
Dockerfile.dev.browser
Normal file
12
Dockerfile.dev.browser
Normal file
@@ -0,0 +1,12 @@
|
||||
FROM ubuntu
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
git golang make npm && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV PATH=$PATH:/root/go/bin
|
||||
|
||||
RUN go get github.com/go-bindata/go-bindata/go-bindata && \
|
||||
go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs
|
||||
@@ -1,17 +1,11 @@
|
||||
FROM ubuntu:16.04
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
|
||||
ENV GOPATH /usr/local
|
||||
|
||||
ENV GOPATH /usr/local/gopath
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
|
||||
ENV MINT_ROOT_DIR /mint
|
||||
|
||||
COPY mint /mint
|
||||
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && \
|
||||
|
||||
29
Dockerfile.ppc64le.release
Normal file
29
Dockerfile.ppc64le.release
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM multiarch/qemu-user-static:x86_64-ppc64le as qemu
|
||||
FROM ppc64le/alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY --from=qemu /usr/bin/qemu-ppc64le-static /usr/bin
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-ppc64le/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,20 +1,7 @@
|
||||
FROM golang:1.13-alpine
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio
|
||||
|
||||
FROM alpine:3.10
|
||||
FROM alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
@@ -24,9 +11,11 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.min.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-amd64/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
29
Dockerfile.s390x.release
Normal file
29
Dockerfile.s390x.release
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM multiarch/qemu-user-static:x86_64-s390x as qemu
|
||||
FROM s390x/alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY --from=qemu /usr/bin/qemu-s390x-static /usr/bin
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-s390x/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,80 +0,0 @@
|
||||
#-------------------------------------------------------------
|
||||
# Stage 1: Build and Unit tests
|
||||
#-------------------------------------------------------------
|
||||
FROM golang:1.13
|
||||
|
||||
COPY . /go/src/github.com/minio/minio
|
||||
WORKDIR /go/src/github.com/minio/minio
|
||||
|
||||
RUN apt-get update && apt-get install -y jq
|
||||
ENV GO111MODULE=on
|
||||
ENV SIMPLE_CI 1
|
||||
|
||||
RUN git config --global http.cookiefile /gitcookie/.gitcookie
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y install sudo
|
||||
RUN touch /etc/sudoers
|
||||
|
||||
RUN echo "root ALL=(ALL) ALL" >> /etc/sudoers
|
||||
RUN echo "ci ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||
RUN echo "Defaults env_reset" >> /etc/sudoers
|
||||
RUN echo 'Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go:/usr/local/go/bin"' >> /etc/sudoers
|
||||
|
||||
RUN mkdir -p /home/ci/.cache
|
||||
|
||||
RUN groupadd -g 999 ci && \
|
||||
useradd -r -u 999 -g ci ci && \
|
||||
chown -R ci:ci /go /home/ci && \
|
||||
chmod -R a+rw /go
|
||||
|
||||
USER ci
|
||||
|
||||
# -- tests --
|
||||
RUN make
|
||||
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
|
||||
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
|
||||
RUN make test-race
|
||||
RUN make crosscompile
|
||||
RUN make verify
|
||||
|
||||
## -- add healing tests
|
||||
RUN make verify-healing
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 2: Test Frontend
|
||||
#-------------------------------------------------------------
|
||||
FROM node:10.15-stretch-slim
|
||||
|
||||
ENV SIMPLE_CI 1
|
||||
|
||||
COPY browser /minio/browser
|
||||
WORKDIR /minio/browser
|
||||
|
||||
RUN yarn
|
||||
RUN yarn test
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 3: Run Gateway Tests
|
||||
#-------------------------------------------------------------
|
||||
FROM ubuntu:16.04
|
||||
|
||||
COPY --from=0 /go/src/github.com/minio/minio/minio /usr/bin/minio
|
||||
COPY buildscripts/gateway-tests.sh /usr/bin/gateway-tests.sh
|
||||
COPY mint /mint
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV LANG C.UTF-8
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /usr/local
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
ENV SIMPLE_CI 1
|
||||
ENV MINT_ROOT_DIR /mint
|
||||
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && \
|
||||
apt-get --yes --quiet install wget jq curl git dnsmasq && \
|
||||
cd /mint && /mint/release.sh
|
||||
|
||||
WORKDIR /mint
|
||||
|
||||
RUN /usr/bin/gateway-tests.sh
|
||||
44
Makefile
44
Makefile
@@ -8,8 +8,6 @@ GOOS := $(shell go env GOOS)
|
||||
VERSION ?= $(shell git describe --tags)
|
||||
TAG ?= "minio/minio:$(VERSION)"
|
||||
|
||||
BUILD_LDFLAGS := '$(LDFLAGS)'
|
||||
|
||||
all: build
|
||||
|
||||
checks:
|
||||
@@ -18,22 +16,12 @@ checks:
|
||||
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golint 1>/dev/null || (echo "Installing golint" && GO111MODULE=off go get -u golang.org/x/lint/golint)
|
||||
ifeq ($(GOARCH),s390x)
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && GO111MODULE=off go get honnef.co/go/tools/cmd/staticcheck)
|
||||
else
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet https://github.com/dominikh/go-tools/releases/download/2019.2.3/staticcheck_${GOOS}_${GOARCH}.tar.gz && tar xf staticcheck_${GOOS}_${GOARCH}.tar.gz && mv staticcheck/staticcheck ${GOPATH}/bin/staticcheck && chmod +x ${GOPATH}/bin/staticcheck && rm -f staticcheck_${GOOS}_${GOARCH}.tar.gz && rm -rf staticcheck)
|
||||
endif
|
||||
@which misspell 1>/dev/null || (echo "Installing misspell" && GO111MODULE=off go get -u github.com/client9/misspell/cmd/misspell)
|
||||
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps vet fmt lint staticcheck spelling
|
||||
|
||||
vet:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on go vet github.com/minio/minio/...
|
||||
verifiers: getdeps fmt lint
|
||||
|
||||
fmt:
|
||||
@echo "Running $@ check"
|
||||
@@ -42,21 +30,8 @@ fmt:
|
||||
|
||||
lint:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/...
|
||||
|
||||
staticcheck:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/...
|
||||
|
||||
spelling:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find cmd/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find pkg/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find docs/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find buildscripts/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find dockerscripts/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
||||
|
||||
# Builds minio, runs the verifiers then runs the tests.
|
||||
check: test
|
||||
@@ -71,21 +46,23 @@ test-race: verifiers build
|
||||
# Verify minio binary
|
||||
verify:
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
# Verify healing of disks with minio binary
|
||||
verify-healing:
|
||||
@echo "Verify healing build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
|
||||
# Builds minio locally.
|
||||
build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
docker: build
|
||||
docker: checks
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@GOOS=linux GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
||||
# Builds minio and installs it to $GOPATH/bin.
|
||||
@@ -101,3 +78,4 @@ clean:
|
||||
@rm -rvf minio
|
||||
@rm -rvf build
|
||||
@rm -rvf release
|
||||
@rm -rvf .verify*
|
||||
|
||||
75
README.md
75
README.md
@@ -1,28 +1,32 @@
|
||||
# MinIO Quickstart Guide
|
||||
[](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
[](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
[](https://min.io)
|
||||
|
||||
MinIO is High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Using MinIO build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
|
||||
## Docker Container
|
||||
### Stable
|
||||
```
|
||||
docker pull minio/minio
|
||||
docker run -p 9000:9000 minio/minio server /data
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio server /data
|
||||
```
|
||||
|
||||
### Edge
|
||||
```
|
||||
docker pull minio/minio:edge
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio:edge server /data
|
||||
```
|
||||
|
||||
> NOTE: Docker will not display the default keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use default keys with containers. Please visit MinIO Docker quickstart guide for more information [here](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
## macOS
|
||||
### Homebrew (recommended)
|
||||
Install minio packages using [Homebrew](http://brew.sh/)
|
||||
Install minio packages using [Homebrew](https://brew.sh/)
|
||||
```sh
|
||||
brew install minio/stable/minio
|
||||
minio server /data
|
||||
@@ -94,23 +98,6 @@ GO111MODULE=on go get github.com/minio/minio
|
||||
|
||||
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
|
||||
|
||||
### iptables
|
||||
|
||||
For hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow
|
||||
access to port 9000
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
### ufw
|
||||
|
||||
For hosts with ufw enabled (Debian based distros), you can use `ufw` command to allow traffic to specific ports. Use below command to allow access to port 9000
|
||||
@@ -145,6 +132,23 @@ Note that `permanent` makes sure the rules are persistent across firewall start,
|
||||
firewall-cmd --reload
|
||||
```
|
||||
|
||||
### iptables
|
||||
|
||||
For hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow
|
||||
access to port 9000
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
## Test using MinIO Browser
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 ensure your server has started successfully.
|
||||
|
||||
@@ -159,20 +163,23 @@ When deployed on a single drive, MinIO server lets clients access any pre-existi
|
||||
The above statement is also valid for all gateway backends.
|
||||
|
||||
## Upgrading MinIO
|
||||
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster and restart them, as shown in the following command from the MinIO client (mc):
|
||||
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster simultaneously and restart them, as shown in the following command from the MinIO client (mc):
|
||||
|
||||
```
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
```
|
||||
|
||||
**Important things to remember during upgrades**:
|
||||
> NOTE: some releases might not allow rolling upgrades, this is always called out in the release notes and it is generally advised to read release notes before upgrading. In such a situation `mc admin update` is the recommended upgrading mechanism to upgrade all servers at once.
|
||||
|
||||
### Important things to remember during MinIO upgrades
|
||||
|
||||
- `mc admin update` will only work if the user running MinIO has write access to the parent directory where the binary is located, for example if the current binary is at `/usr/local/bin/minio`, you would need write access to `/usr/local/bin`.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` until all clusters have been updated.
|
||||
- If you are updating the server it is always recommended (unless explicitly mentioned in MinIO server release notes), to update `mc` once all the servers have been upgraded using `mc update`.
|
||||
- `mc admin update` is disabled in docker/container environments, container environments provide their own mechanisms for updating running containers.
|
||||
- If you are using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If you are using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
|
||||
- `mc admin update` is disabled in kubernetes/container environments, container environments provide their own mechanisms to rollout of updates.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` to any new releases until all clusters have been successfully updated.
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)x
|
||||
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
## Explore Further
|
||||
- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
@@ -185,11 +192,5 @@ mc admin update <minio alias, e.g., myminio>
|
||||
## Contribute to MinIO Project
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
|
||||
## Caveats
|
||||
MinIO in its default mode doesn't use MD5Sum checkums of incoming streams unless requested by the client in `Content-Md5` header for validation. This may lead to incompatibility with rare S3 clients like `s3ql` which unfortunately do not set `Content-Md5` but depend on hex MD5Sum for the stream to be calculated by the server. MinIO considers this as a bug in `s3ql` and should be fixed on the client side because MD5Sum is a poor way to checksum and validate the authenticity of the objects. Although MinIO provides a workaround until client applications are fixed use `--compat` option instead to start the server.
|
||||
```sh
|
||||
./minio --compat server /data
|
||||
```
|
||||
|
||||
## License
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Fminio%2Fminio?ref=badge_large)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# MinIO Quickstart Guide [](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
# MinIO Quickstart Guide [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
MinIO 是一个基于Apache License v2.0开源协议的对象存储服务。它兼容亚马逊S3云存储服务接口,非常适合于存储大容量非结构化的数据,例如图片、视频、日志文件、备份数据和容器/虚拟机镜像等,而一个对象文件可以是任意大小,从几kb到最大5T不等。
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
``MinIO Browser`` provides minimal set of UI to manage buckets and objects on ``minio`` server. ``MinIO Browser`` is written in javascript and released under [Apache 2.0 License](./LICENSE).
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
### Install node
|
||||
@@ -11,6 +12,11 @@ exec -l $SHELL
|
||||
nvm install stable
|
||||
```
|
||||
|
||||
### Install node dependencies
|
||||
```sh
|
||||
npm install
|
||||
```
|
||||
|
||||
### Install `go-bindata` and `go-bindata-assetfs`
|
||||
|
||||
If you do not have a working Golang environment, please follow [Install Golang](https://golang.org/doc/install)
|
||||
@@ -30,13 +36,16 @@ npm run release
|
||||
|
||||
This generates ui-assets.go in the current directory. Now do `make` in the parent directory to build the minio binary with the newly generated ``ui-assets.go``
|
||||
|
||||
|
||||
## Run MinIO Browser with live reload
|
||||
|
||||
### Run MinIO Browser with live reload
|
||||
|
||||
```sh
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://localhost:8080/minio/](http://localhost:8080/minio/) in your browser to play with the application
|
||||
Open [http://localhost:8080/minio/](http://localhost:8080/minio/) in your browser to play with the application.
|
||||
|
||||
### Run MinIO Browser with live reload on custom port
|
||||
|
||||
@@ -54,7 +63,7 @@ index 3ccdaba..9496c56 100644
|
||||
+ port: 8888,
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
target: 'http://localhost:9000',
|
||||
@@ -97,7 +98,7 @@ var exports = {
|
||||
if (process.env.NODE_ENV === 'dev') {
|
||||
exports.entry = [
|
||||
@@ -70,4 +79,102 @@ index 3ccdaba..9496c56 100644
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://localhost:8888/minio/](http://localhost:8888/minio/) in your browser to play with the application
|
||||
Open [http://localhost:8888/minio/](http://localhost:8888/minio/) in your browser to play with the application.
|
||||
|
||||
### Run MinIO Browser with live reload on any IP
|
||||
|
||||
Edit `browser/webpack.config.js`
|
||||
|
||||
```diff
|
||||
diff --git a/browser/webpack.config.js b/browser/webpack.config.js
|
||||
index 8bdbba53..139f6049 100644
|
||||
--- a/browser/webpack.config.js
|
||||
+++ b/browser/webpack.config.js
|
||||
@@ -71,6 +71,7 @@ var exports = {
|
||||
historyApiFallback: {
|
||||
index: '/minio/'
|
||||
},
|
||||
+ host: '0.0.0.0',
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
```
|
||||
|
||||
```sh
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://IP:8080/minio/](http://IP:8080/minio/) in your browser to play with the application.
|
||||
|
||||
|
||||
## Run tests
|
||||
|
||||
npm run test
|
||||
|
||||
|
||||
## Docker development environment
|
||||
|
||||
This approach will download the sources on your machine such that you are able to use your IDE or editor of choice.
|
||||
A Docker container will be used in order to provide a controlled build environment without messing with your host system.
|
||||
|
||||
### Prepare host system
|
||||
|
||||
Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) and [Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
### Development within container
|
||||
|
||||
Prepare and build container
|
||||
```
|
||||
git clone git@github.com:minio/minio.git
|
||||
cd minio
|
||||
docker build -t minio-dev -f Dockerfile.dev.browser .
|
||||
```
|
||||
|
||||
Run container, build and run core
|
||||
```sh
|
||||
docker run -it --rm --name minio-dev -v "$PWD":/minio minio-dev
|
||||
|
||||
cd /minio/browser
|
||||
npm install
|
||||
npm run release
|
||||
cd /minio
|
||||
make
|
||||
./minio server /data
|
||||
```
|
||||
Note `Endpoint` IP (the one which is _not_ `127.0.0.1`), `AccessKey` and `SecretKey` (both default to `minioadmin`) in order to enter them in the browser later.
|
||||
|
||||
|
||||
Open another terminal.
|
||||
Connect to container
|
||||
```sh
|
||||
docker exec -it minio-dev bash
|
||||
```
|
||||
|
||||
Apply patch to allow access from outside container
|
||||
```sh
|
||||
cd /minio
|
||||
git apply --ignore-whitespace <<EOF
|
||||
diff --git a/browser/webpack.config.js b/browser/webpack.config.js
|
||||
index 8bdbba53..139f6049 100644
|
||||
--- a/browser/webpack.config.js
|
||||
+++ b/browser/webpack.config.js
|
||||
@@ -71,6 +71,7 @@ var exports = {
|
||||
historyApiFallback: {
|
||||
index: '/minio/'
|
||||
},
|
||||
+ host: '0.0.0.0',
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
EOF
|
||||
```
|
||||
|
||||
Build and run frontend with auto-reload
|
||||
```sh
|
||||
cd /minio/browser
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://IP:8080/minio/](http://IP:8080/minio/) in your browser to play with the application.
|
||||
|
||||
|
||||
@@ -88,11 +88,6 @@ export class ChangePasswordModal extends React.Component {
|
||||
|
||||
canChangePassword() {
|
||||
const { serverInfo } = this.props
|
||||
// Password change is not allowed in WORM mode
|
||||
if (serverInfo.info.isWorm) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Password change is not allowed for temporary users(STS)
|
||||
if(serverInfo.userInfo.isTempUser) {
|
||||
return false
|
||||
|
||||
@@ -26,13 +26,10 @@ export class StorageInfo extends React.Component {
|
||||
}
|
||||
render() {
|
||||
const { used } = this.props.storageInfo
|
||||
|
||||
if (!used) {
|
||||
if (!used || used == 0) {
|
||||
return <noscript />
|
||||
}
|
||||
|
||||
const totalUsed = used.reduce((v1, v2) => v1 + v2, 0)
|
||||
|
||||
return (
|
||||
<div className="feh-used">
|
||||
<div className="fehu-chart">
|
||||
@@ -41,7 +38,7 @@ export class StorageInfo extends React.Component {
|
||||
<ul>
|
||||
<li>
|
||||
<span>Used: </span>
|
||||
{humanize.filesize(totalUsed)}
|
||||
{humanize.filesize(used)}
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
@@ -64,17 +64,6 @@ describe("ChangePasswordModal", () => {
|
||||
shallow(<ChangePasswordModal serverInfo={serverInfo} />)
|
||||
})
|
||||
|
||||
it("should not allow changing password when isWorm is true", () => {
|
||||
const newServerInfo = { ...serverInfo, info: { isWorm: true } }
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
|
||||
expect(
|
||||
wrapper
|
||||
.find("ModalBody")
|
||||
.childAt(0)
|
||||
.text()
|
||||
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
|
||||
})
|
||||
|
||||
it("should not allow changing password when not IAM user", () => {
|
||||
const newServerInfo = {
|
||||
...serverInfo,
|
||||
|
||||
@@ -21,7 +21,7 @@ import { StorageInfo } from "../StorageInfo"
|
||||
describe("StorageInfo", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(
|
||||
<StorageInfo storageInfo={{ used: [60] }} fetchStorageInfo={jest.fn()} />
|
||||
<StorageInfo storageInfo={ {used: 60} } fetchStorageInfo={jest.fn()} />
|
||||
)
|
||||
})
|
||||
|
||||
@@ -29,7 +29,7 @@ describe("StorageInfo", () => {
|
||||
const fetchStorageInfo = jest.fn()
|
||||
shallow(
|
||||
<StorageInfo
|
||||
storageInfo={{ used: [60] }}
|
||||
storageInfo={ {used: 60} }
|
||||
fetchStorageInfo={fetchStorageInfo}
|
||||
/>
|
||||
)
|
||||
@@ -40,7 +40,7 @@ describe("StorageInfo", () => {
|
||||
const fetchStorageInfo = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<StorageInfo
|
||||
storageInfo={{ used: null }}
|
||||
storageInfo={ {used: 0} }
|
||||
fetchStorageInfo={fetchStorageInfo}
|
||||
/>
|
||||
)
|
||||
|
||||
@@ -20,7 +20,9 @@ import * as actionsCommon from "../actions"
|
||||
|
||||
jest.mock("../../web", () => ({
|
||||
StorageInfo: jest.fn(() => {
|
||||
return Promise.resolve({ storageInfo: { Used: [60] } })
|
||||
return Promise.resolve({
|
||||
used: 60
|
||||
})
|
||||
}),
|
||||
ServerInfo: jest.fn(() => {
|
||||
return Promise.resolve({
|
||||
@@ -39,7 +41,7 @@ describe("Common actions", () => {
|
||||
it("creates common/SET_STORAGE_INFO after fetching the storage details ", () => {
|
||||
const store = mockStore()
|
||||
const expectedActions = [
|
||||
{ type: "common/SET_STORAGE_INFO", storageInfo: { used: [60] } }
|
||||
{ type: "common/SET_STORAGE_INFO", storageInfo: { used: 60 } }
|
||||
]
|
||||
return store.dispatch(actionsCommon.fetchStorageInfo()).then(() => {
|
||||
const actions = store.getActions()
|
||||
|
||||
@@ -21,11 +21,7 @@ describe("common reducer", () => {
|
||||
it("should return the initial state", () => {
|
||||
expect(reducer(undefined, {})).toEqual({
|
||||
sidebarOpen: false,
|
||||
storageInfo: {
|
||||
total: [0],
|
||||
free: [0],
|
||||
used: [0]
|
||||
},
|
||||
storageInfo: {used: 0},
|
||||
serverInfo: {}
|
||||
})
|
||||
})
|
||||
@@ -62,11 +58,11 @@ describe("common reducer", () => {
|
||||
{},
|
||||
{
|
||||
type: actionsCommon.SET_STORAGE_INFO,
|
||||
storageInfo: { total: [100], free: [40] }
|
||||
storageInfo: { }
|
||||
}
|
||||
)
|
||||
).toEqual({
|
||||
storageInfo: { total: [100], free: [40] }
|
||||
storageInfo: { }
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -33,8 +33,7 @@ export const fetchStorageInfo = () => {
|
||||
return function(dispatch) {
|
||||
return web.StorageInfo().then(res => {
|
||||
const storageInfo = {
|
||||
total: res.storageInfo.Total,
|
||||
used: res.storageInfo.Used
|
||||
used: res.used
|
||||
}
|
||||
dispatch(setStorageInfo(storageInfo))
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@ import * as actionsCommon from "./actions"
|
||||
export default (
|
||||
state = {
|
||||
sidebarOpen: false,
|
||||
storageInfo: { total: [0], free: [0], used: [0] },
|
||||
storageInfo: {used: 0},
|
||||
serverInfo: {}
|
||||
},
|
||||
action
|
||||
|
||||
24
browser/app/js/browser/selectors.js
Normal file
24
browser/app/js/browser/selectors.js
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { createSelector } from "reselect"
|
||||
|
||||
export const getServerInfo = state => state.browser.serverInfo
|
||||
|
||||
export const hasServerPublicDomain = createSelector(
|
||||
getServerInfo,
|
||||
serverInfo => Boolean(serverInfo.info && serverInfo.info.domains && serverInfo.info.domains.length),
|
||||
)
|
||||
@@ -31,19 +31,38 @@ export const SET_POLICIES = "buckets/SET_POLICIES"
|
||||
|
||||
export const fetchBuckets = () => {
|
||||
return function(dispatch) {
|
||||
const { bucket, prefix } = pathSlice(history.location.pathname)
|
||||
return web.ListBuckets().then(res => {
|
||||
const buckets = res.buckets ? res.buckets.map(bucket => bucket.name) : []
|
||||
dispatch(setList(buckets))
|
||||
if (buckets.length > 0) {
|
||||
const { bucket, prefix } = pathSlice(history.location.pathname)
|
||||
dispatch(setList(buckets))
|
||||
if (bucket && buckets.indexOf(bucket) > -1) {
|
||||
dispatch(selectBucket(bucket, prefix))
|
||||
} else {
|
||||
dispatch(selectBucket(buckets[0]))
|
||||
}
|
||||
} else {
|
||||
dispatch(selectBucket(""))
|
||||
history.replace("/")
|
||||
if (bucket) {
|
||||
dispatch(setList([bucket]))
|
||||
dispatch(selectBucket(bucket, prefix))
|
||||
} else {
|
||||
dispatch(selectBucket(""))
|
||||
history.replace("/")
|
||||
}
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
if (bucket && err.message === "Access Denied." || err.message.indexOf('Prefix access is denied') > -1 ) {
|
||||
dispatch(setList([bucket]))
|
||||
dispatch(selectBucket(bucket, prefix))
|
||||
} else {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
autoClear: true,
|
||||
})
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -14,30 +14,67 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import mimedb from 'mime-types'
|
||||
import mimedb from "mime-types"
|
||||
|
||||
const isFolder = (name, contentType) => {
|
||||
if (name.endsWith('/')) return true
|
||||
if (name.endsWith("/")) return true
|
||||
return false
|
||||
}
|
||||
|
||||
const isPdf = (name, contentType) => {
|
||||
if (contentType === 'application/pdf') return true
|
||||
if (contentType === "application/pdf") return true
|
||||
return false
|
||||
}
|
||||
const isImage = (name, contentType) => {
|
||||
if (
|
||||
contentType === "image/jpeg" ||
|
||||
contentType === "image/gif" ||
|
||||
contentType === "image/x-icon" ||
|
||||
contentType === "image/png" ||
|
||||
contentType === "image/svg+xml" ||
|
||||
contentType === "image/tiff" ||
|
||||
contentType === "image/webp"
|
||||
)
|
||||
return true
|
||||
return false
|
||||
}
|
||||
|
||||
const isZip = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
if (contentType.split('/')[1].includes('zip')) return true
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
if (contentType.split("/")[1].includes("zip")) return true
|
||||
return false
|
||||
}
|
||||
|
||||
const isCode = (name, contentType) => {
|
||||
const codeExt = ['c', 'cpp', 'go', 'py', 'java', 'rb', 'js', 'pl', 'fs',
|
||||
'php', 'css', 'less', 'scss', 'coffee', 'net', 'html',
|
||||
'rs', 'exs', 'scala', 'hs', 'clj', 'el', 'scm', 'lisp',
|
||||
'asp', 'aspx']
|
||||
const ext = name.split('.').reverse()[0]
|
||||
const codeExt = [
|
||||
"c",
|
||||
"cpp",
|
||||
"go",
|
||||
"py",
|
||||
"java",
|
||||
"rb",
|
||||
"js",
|
||||
"pl",
|
||||
"fs",
|
||||
"php",
|
||||
"css",
|
||||
"less",
|
||||
"scss",
|
||||
"coffee",
|
||||
"net",
|
||||
"html",
|
||||
"rs",
|
||||
"exs",
|
||||
"scala",
|
||||
"hs",
|
||||
"clj",
|
||||
"el",
|
||||
"scm",
|
||||
"lisp",
|
||||
"asp",
|
||||
"aspx",
|
||||
]
|
||||
const ext = name.split(".").reverse()[0]
|
||||
for (var i in codeExt) {
|
||||
if (ext === codeExt[i]) return true
|
||||
}
|
||||
@@ -45,9 +82,9 @@ const isCode = (name, contentType) => {
|
||||
}
|
||||
|
||||
const isExcel = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
const types = ['excel', 'spreadsheet']
|
||||
const subType = contentType.split('/')[1]
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
const types = ["excel", "spreadsheet"]
|
||||
const subType = contentType.split("/")[1]
|
||||
for (var i in types) {
|
||||
if (subType.includes(types[i])) return true
|
||||
}
|
||||
@@ -55,9 +92,9 @@ const isExcel = (name, contentType) => {
|
||||
}
|
||||
|
||||
const isDoc = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
const types = ['word', '.document']
|
||||
const subType = contentType.split('/')[1]
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
const types = ["word", ".document"]
|
||||
const subType = contentType.split("/")[1]
|
||||
for (var i in types) {
|
||||
if (subType.includes(types[i])) return true
|
||||
}
|
||||
@@ -65,9 +102,9 @@ const isDoc = (name, contentType) => {
|
||||
}
|
||||
|
||||
const isPresentation = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
var types = ['powerpoint', 'presentation']
|
||||
const subType = contentType.split('/')[1]
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
var types = ["powerpoint", "presentation"]
|
||||
const subType = contentType.split("/")[1]
|
||||
for (var i in types) {
|
||||
if (subType.includes(types[i])) return true
|
||||
}
|
||||
@@ -76,31 +113,32 @@ const isPresentation = (name, contentType) => {
|
||||
|
||||
const typeToIcon = (type) => {
|
||||
return (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
if (contentType.split('/')[0] === type) return true
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
if (contentType.split("/")[0] === type) return true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
export const getDataType = (name, contentType) => {
|
||||
if (contentType === "") {
|
||||
contentType = mimedb.lookup(name) || 'application/octet-stream'
|
||||
contentType = mimedb.lookup(name) || "application/octet-stream"
|
||||
}
|
||||
const check = [
|
||||
['folder', isFolder],
|
||||
['code', isCode],
|
||||
['audio', typeToIcon('audio')],
|
||||
['image', typeToIcon('image')],
|
||||
['video', typeToIcon('video')],
|
||||
['text', typeToIcon('text')],
|
||||
['pdf', isPdf],
|
||||
['zip', isZip],
|
||||
['excel', isExcel],
|
||||
['doc', isDoc],
|
||||
['presentation', isPresentation]
|
||||
["folder", isFolder],
|
||||
["code", isCode],
|
||||
["audio", typeToIcon("audio")],
|
||||
["image", typeToIcon("image")],
|
||||
["video", typeToIcon("video")],
|
||||
["text", typeToIcon("text")],
|
||||
["pdf", isPdf],
|
||||
["image", isImage],
|
||||
["zip", isZip],
|
||||
["excel", isExcel],
|
||||
["doc", isDoc],
|
||||
["presentation", isPresentation],
|
||||
]
|
||||
for (var i in check) {
|
||||
if (check[i][1](name, contentType)) return check[i][0]
|
||||
}
|
||||
return 'other'
|
||||
return "other"
|
||||
}
|
||||
|
||||
@@ -19,18 +19,22 @@ import { connect } from "react-redux"
|
||||
import { Dropdown } from "react-bootstrap"
|
||||
import ShareObjectModal from "./ShareObjectModal"
|
||||
import DeleteObjectConfirmModal from "./DeleteObjectConfirmModal"
|
||||
import PreviewObjectModal from "./PreviewObjectModal"
|
||||
|
||||
import * as objectsActions from "./actions"
|
||||
import { getDataType } from "../mime.js"
|
||||
import {
|
||||
SHARE_OBJECT_EXPIRY_DAYS,
|
||||
SHARE_OBJECT_EXPIRY_HOURS,
|
||||
SHARE_OBJECT_EXPIRY_MINUTES
|
||||
SHARE_OBJECT_EXPIRY_MINUTES,
|
||||
} from "../constants"
|
||||
|
||||
export class ObjectActions extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
showDeleteConfirmation: false
|
||||
showDeleteConfirmation: false,
|
||||
showPreview: false,
|
||||
}
|
||||
}
|
||||
shareObject(e) {
|
||||
@@ -43,6 +47,11 @@ export class ObjectActions extends React.Component {
|
||||
SHARE_OBJECT_EXPIRY_MINUTES
|
||||
)
|
||||
}
|
||||
handleDownload(e) {
|
||||
e.preventDefault()
|
||||
const { object, downloadObject } = this.props
|
||||
downloadObject(object.name)
|
||||
}
|
||||
deleteObject() {
|
||||
const { object, deleteObject } = this.props
|
||||
deleteObject(object.name)
|
||||
@@ -53,7 +62,20 @@ export class ObjectActions extends React.Component {
|
||||
}
|
||||
hideDeleteConfirmModal() {
|
||||
this.setState({
|
||||
showDeleteConfirmation: false
|
||||
showDeleteConfirmation: false,
|
||||
})
|
||||
}
|
||||
getObjectURL(objectname, callback) {
|
||||
const { getObjectURL } = this.props
|
||||
getObjectURL(objectname, callback)
|
||||
}
|
||||
showPreviewModal(e) {
|
||||
e.preventDefault()
|
||||
this.setState({ showPreview: true })
|
||||
}
|
||||
hidePreviewModal() {
|
||||
this.setState({
|
||||
showPreview: false,
|
||||
})
|
||||
}
|
||||
render() {
|
||||
@@ -65,26 +87,54 @@ export class ObjectActions extends React.Component {
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Share"
|
||||
onClick={this.shareObject.bind(this)}
|
||||
>
|
||||
<i className="fas fa-share-alt" />
|
||||
</a>
|
||||
{getDataType(object.name, object.contentType) == "image" && (
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Preview"
|
||||
onClick={this.showPreviewModal.bind(this)}
|
||||
>
|
||||
<i className="far fa-file-image" />
|
||||
</a>
|
||||
)}
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Download"
|
||||
onClick={this.handleDownload.bind(this)}
|
||||
>
|
||||
<i className="fas fa-cloud-download-alt" />
|
||||
</a>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Delete"
|
||||
onClick={this.showDeleteConfirmModal.bind(this)}
|
||||
>
|
||||
<i className="fas fa-trash-alt" />
|
||||
</a>
|
||||
</Dropdown.Menu>
|
||||
{(showShareObjectModal && shareObjectName === object.name) &&
|
||||
<ShareObjectModal object={object} />}
|
||||
{showShareObjectModal && shareObjectName === object.name && (
|
||||
<ShareObjectModal object={object} />
|
||||
)}
|
||||
{this.state.showDeleteConfirmation && (
|
||||
<DeleteObjectConfirmModal
|
||||
deleteObject={this.deleteObject.bind(this)}
|
||||
hideDeleteConfirmModal={this.hideDeleteConfirmModal.bind(this)}
|
||||
/>
|
||||
)}
|
||||
{this.state.showPreview && (
|
||||
<PreviewObjectModal
|
||||
object={object}
|
||||
hidePreviewModal={this.hidePreviewModal.bind(this)}
|
||||
getObjectURL={this.getObjectURL.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</Dropdown>
|
||||
)
|
||||
}
|
||||
@@ -94,15 +144,18 @@ const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
showShareObjectModal: state.objects.shareObject.show,
|
||||
shareObjectName: state.objects.shareObject.object
|
||||
shareObjectName: state.objects.shareObject.object,
|
||||
}
|
||||
}
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
const mapDispatchToProps = (dispatch) => {
|
||||
return {
|
||||
downloadObject: object => dispatch(objectsActions.downloadObject(object)),
|
||||
shareObject: (object, days, hours, minutes) =>
|
||||
dispatch(objectsActions.shareObject(object, days, hours, minutes)),
|
||||
deleteObject: object => dispatch(objectsActions.deleteObject(object))
|
||||
deleteObject: (object) => dispatch(objectsActions.deleteObject(object)),
|
||||
getObjectURL: (object, callback) =>
|
||||
dispatch(objectsActions.getObjectURL(object, callback)),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -54,8 +54,11 @@ export const ObjectItem = ({
|
||||
href={getDataType(name, contentType) === "folder" ? name : "#"}
|
||||
onClick={e => {
|
||||
e.preventDefault()
|
||||
// onclick function is passed only when we have a prefix
|
||||
if (onClick) {
|
||||
onClick()
|
||||
} else {
|
||||
checked ? uncheckObject(name) : checkObject(name)
|
||||
}
|
||||
}}
|
||||
>
|
||||
|
||||
95
browser/app/js/objects/PrefixActions.js
Normal file
95
browser/app/js/objects/PrefixActions.js
Normal file
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import { Dropdown } from "react-bootstrap"
|
||||
import DeleteObjectConfirmModal from "./DeleteObjectConfirmModal"
|
||||
import * as actions from "./actions"
|
||||
|
||||
export class PrefixActions extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
showDeleteConfirmation: false,
|
||||
}
|
||||
}
|
||||
handleDownload(e) {
|
||||
e.preventDefault()
|
||||
const { object, downloadPrefix } = this.props
|
||||
downloadPrefix(object.name)
|
||||
}
|
||||
deleteObject() {
|
||||
const { object, deleteObject } = this.props
|
||||
deleteObject(object.name)
|
||||
}
|
||||
showDeleteConfirmModal(e) {
|
||||
e.preventDefault()
|
||||
this.setState({ showDeleteConfirmation: true })
|
||||
}
|
||||
hideDeleteConfirmModal() {
|
||||
this.setState({
|
||||
showDeleteConfirmation: false,
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { object, showShareObjectModal, shareObjectName } = this.props
|
||||
return (
|
||||
<Dropdown id={`obj-actions-${object.name}`}>
|
||||
<Dropdown.Toggle noCaret className="fia-toggle" />
|
||||
<Dropdown.Menu>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Download as zip"
|
||||
onClick={this.handleDownload.bind(this)}
|
||||
>
|
||||
<i className="fas fa-cloud-download-alt" />
|
||||
</a>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Delete"
|
||||
onClick={this.showDeleteConfirmModal.bind(this)}
|
||||
>
|
||||
<i className="fas fa-trash-alt" />
|
||||
</a>
|
||||
</Dropdown.Menu>
|
||||
{this.state.showDeleteConfirmation && (
|
||||
<DeleteObjectConfirmModal
|
||||
deleteObject={this.deleteObject.bind(this)}
|
||||
hideDeleteConfirmModal={this.hideDeleteConfirmModal.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</Dropdown>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
}
|
||||
}
|
||||
|
||||
const mapDispatchToProps = (dispatch) => {
|
||||
return {
|
||||
downloadPrefix: object => dispatch(actions.downloadPrefix(object)),
|
||||
deleteObject: (object) => dispatch(actions.deleteObject(object)),
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(PrefixActions)
|
||||
@@ -17,22 +17,32 @@
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import ObjectItem from "./ObjectItem"
|
||||
import PrefixActions from "./PrefixActions"
|
||||
import * as actionsObjects from "./actions"
|
||||
import { getCheckedList } from "./selectors"
|
||||
|
||||
export const PrefixContainer = ({ object, currentPrefix, selectPrefix }) => {
|
||||
export const PrefixContainer = ({
|
||||
object,
|
||||
currentPrefix,
|
||||
checkedObjectsCount,
|
||||
selectPrefix
|
||||
}) => {
|
||||
const props = {
|
||||
name: object.name,
|
||||
contentType: object.contentType,
|
||||
onClick: () => selectPrefix(`${currentPrefix}${object.name}`)
|
||||
}
|
||||
|
||||
if (checkedObjectsCount == 0) {
|
||||
props.actionButtons = <PrefixActions object={object} />
|
||||
}
|
||||
return <ObjectItem {...props} />
|
||||
}
|
||||
|
||||
const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
currentPrefix: state.objects.currentPrefix
|
||||
currentPrefix: state.objects.currentPrefix,
|
||||
checkedObjectsCount: getCheckedList(state).length
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
68
browser/app/js/objects/PreviewObjectModal.js
Normal file
68
browser/app/js/objects/PreviewObjectModal.js
Normal file
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { Modal, ModalHeader, ModalBody } from "react-bootstrap"
|
||||
|
||||
class PreviewObjectModal extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
url: "",
|
||||
}
|
||||
}
|
||||
|
||||
componentDidMount() {
|
||||
this.props.getObjectURL(this.props.object.name, (url) => {
|
||||
this.setState({
|
||||
url: url,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
render() {
|
||||
const { hidePreviewModal } = this.props
|
||||
return (
|
||||
<Modal
|
||||
show={true}
|
||||
animation={false}
|
||||
onHide={hidePreviewModal}
|
||||
bsSize="large"
|
||||
>
|
||||
<ModalHeader>Preview</ModalHeader>
|
||||
<ModalBody>
|
||||
<div className="input-group">
|
||||
{this.state.url && (
|
||||
<object data={this.state.url}>
|
||||
<h3 style={{ textAlign: "center", display: "block", width: "100%" }}>
|
||||
Do not have read permissions to preview "{this.props.object.name}"
|
||||
</h3>
|
||||
</object>
|
||||
)}
|
||||
</div>
|
||||
</ModalBody>
|
||||
<div className="modal-footer">
|
||||
{
|
||||
<button className="btn btn-link" onClick={hidePreviewModal}>
|
||||
Cancel
|
||||
</button>
|
||||
}
|
||||
</div>
|
||||
</Modal>
|
||||
)
|
||||
}
|
||||
}
|
||||
export default PreviewObjectModal
|
||||
@@ -77,7 +77,8 @@ export class ShareObjectModal extends React.Component {
|
||||
hideShareObject()
|
||||
}
|
||||
render() {
|
||||
const { shareObjectDetails, shareObject, hideShareObject } = this.props
|
||||
const { shareObjectDetails, hideShareObject } = this.props
|
||||
const url = `${window.location.protocol}//${shareObjectDetails.url}`
|
||||
return (
|
||||
<Modal
|
||||
show={true}
|
||||
@@ -93,11 +94,12 @@ export class ShareObjectModal extends React.Component {
|
||||
type="text"
|
||||
ref={node => (this.copyTextInput = node)}
|
||||
readOnly="readOnly"
|
||||
value={window.location.protocol + "//" + shareObjectDetails.url}
|
||||
value={url}
|
||||
onClick={() => this.copyTextInput.select()}
|
||||
/>
|
||||
</div>
|
||||
<div
|
||||
{shareObjectDetails.showExpiryDate && (
|
||||
<div
|
||||
className="input-group"
|
||||
style={{ display: web.LoggedIn() ? "block" : "none" }}
|
||||
>
|
||||
@@ -174,10 +176,11 @@ export class ShareObjectModal extends React.Component {
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</ModalBody>
|
||||
<div className="modal-footer">
|
||||
<CopyToClipboard
|
||||
text={window.location.protocol + "//" + shareObjectDetails.url}
|
||||
text={url}
|
||||
onCopy={this.onUrlCopied.bind(this)}
|
||||
>
|
||||
<button className="btn btn-success">Copy Link</button>
|
||||
|
||||
@@ -66,6 +66,63 @@ describe("ObjectActions", () => {
|
||||
expect(deleteObject).toHaveBeenCalledWith("obj1")
|
||||
})
|
||||
|
||||
|
||||
it("should call downloadObject when single object is selected and download button is clicked", () => {
|
||||
const downloadObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1" }}
|
||||
currentPrefix={"pre1/"}
|
||||
downloadObject={downloadObject} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.at(1)
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(downloadObject).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
|
||||
it("should show PreviewObjectModal when preview action is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1", contentType: "image/jpeg"}}
|
||||
currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.at(1)
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(wrapper.state("showPreview")).toBeTruthy()
|
||||
expect(wrapper.find("PreviewObjectModal").length).toBe(1)
|
||||
})
|
||||
|
||||
it("should hide PreviewObjectModal when cancel button is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1" , contentType: "image/jpeg"}}
|
||||
currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.at(1)
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
wrapper.find("PreviewObjectModal").prop("hidePreviewModal")()
|
||||
wrapper.update()
|
||||
expect(wrapper.state("showPreview")).toBeFalsy()
|
||||
expect(wrapper.find("PreviewObjectModal").length).toBe(0)
|
||||
})
|
||||
it("should not show PreviewObjectModal when preview action is clicked if object is not an image", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1"}}
|
||||
currentPrefix={"pre1/"} />
|
||||
)
|
||||
expect(wrapper
|
||||
.find("a")
|
||||
.length).toBe(3) // find only the other 2
|
||||
})
|
||||
|
||||
it("should call shareObject with object and expiry", () => {
|
||||
const shareObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
|
||||
@@ -30,7 +30,10 @@ describe("ObjectItem", () => {
|
||||
|
||||
it("shouldn't call onClick when the object isclicked", () => {
|
||||
const onClick = jest.fn()
|
||||
const wrapper = shallow(<ObjectItem name={"test"} />)
|
||||
const checkObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectItem name={"test"} checkObject={checkObject} />
|
||||
)
|
||||
wrapper.find("a").simulate("click", { preventDefault: jest.fn() })
|
||||
expect(onClick).not.toHaveBeenCalled()
|
||||
})
|
||||
@@ -57,9 +60,15 @@ describe("ObjectItem", () => {
|
||||
})
|
||||
|
||||
it("should call uncheckObject when the object/prefix is unchecked", () => {
|
||||
const checkObject = jest.fn()
|
||||
const uncheckObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectItem name={"test"} checked={true} uncheckObject={uncheckObject} />
|
||||
<ObjectItem
|
||||
name={"test"}
|
||||
checked={true}
|
||||
checkObject={checkObject}
|
||||
uncheckObject={uncheckObject}
|
||||
/>
|
||||
)
|
||||
wrapper.find("input[type='checkbox']").simulate("change")
|
||||
expect(uncheckObject).toHaveBeenCalledWith("test")
|
||||
|
||||
84
browser/app/js/objects/__tests__/PrefixActions.test.js
Normal file
84
browser/app/js/objects/__tests__/PrefixActions.test.js
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { PrefixActions } from "../PrefixActions"
|
||||
|
||||
describe("PrefixActions", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<PrefixActions object={{ name: "abc/" }} currentPrefix={"pre1/"} />)
|
||||
})
|
||||
|
||||
it("should show DeleteObjectConfirmModal when delete action is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixActions object={{ name: "abc/" }} currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.last()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(wrapper.state("showDeleteConfirmation")).toBeTruthy()
|
||||
expect(wrapper.find("DeleteObjectConfirmModal").length).toBe(1)
|
||||
})
|
||||
|
||||
it("should hide DeleteObjectConfirmModal when Cancel button is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixActions object={{ name: "abc/" }} currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.last()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
wrapper.find("DeleteObjectConfirmModal").prop("hideDeleteConfirmModal")()
|
||||
wrapper.update()
|
||||
expect(wrapper.state("showDeleteConfirmation")).toBeFalsy()
|
||||
expect(wrapper.find("DeleteObjectConfirmModal").length).toBe(0)
|
||||
})
|
||||
|
||||
it("should call deleteObject with object name", () => {
|
||||
const deleteObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<PrefixActions
|
||||
object={{ name: "abc/" }}
|
||||
currentPrefix={"pre1/"}
|
||||
deleteObject={deleteObject}
|
||||
/>
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.last()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
wrapper.find("DeleteObjectConfirmModal").prop("deleteObject")()
|
||||
expect(deleteObject).toHaveBeenCalledWith("abc/")
|
||||
})
|
||||
|
||||
|
||||
it("should call downloadPrefix when single object is selected and download button is clicked", () => {
|
||||
const downloadPrefix = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<PrefixActions
|
||||
object={{ name: "abc/" }}
|
||||
currentPrefix={"pre1/"}
|
||||
downloadPrefix={downloadPrefix} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.first()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(downloadPrefix).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
@@ -41,4 +41,22 @@ describe("PrefixContainer", () => {
|
||||
wrapper.find("Connect(ObjectItem)").prop("onClick")()
|
||||
expect(selectPrefix).toHaveBeenCalledWith("xyz/abc/")
|
||||
})
|
||||
|
||||
it("should pass actions to ObjectItem", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixContainer object={{ name: "abc/" }} checkedObjectsCount={0} />
|
||||
)
|
||||
expect(wrapper.find("Connect(ObjectItem)").prop("actionButtons")).not.toBe(
|
||||
undefined
|
||||
)
|
||||
})
|
||||
|
||||
it("should pass empty actions to ObjectItem when checkedObjectCount is more than 0", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixContainer object={{ name: "abc/" }} checkedObjectsCount={1} />
|
||||
)
|
||||
expect(wrapper.find("Connect(ObjectItem)").prop("actionButtons")).toBe(
|
||||
undefined
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -34,7 +34,7 @@ describe("ShareObjectModal", () => {
|
||||
shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
/>
|
||||
)
|
||||
})
|
||||
@@ -44,7 +44,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
hideShareObject={hideShareObject}
|
||||
/>
|
||||
)
|
||||
@@ -59,7 +59,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
@@ -76,7 +76,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
hideShareObject={hideShareObject}
|
||||
showCopyAlert={showCopyAlert}
|
||||
/>
|
||||
@@ -89,8 +89,15 @@ describe("ShareObjectModal", () => {
|
||||
describe("Update expiry values", () => {
|
||||
const props = {
|
||||
object: { name: "obj1" },
|
||||
shareObjectDetails: { show: true, object: "obj1", url: "test" }
|
||||
shareObjectDetails: { show: true, object: "obj1", url: "test", showExpiryDate: true }
|
||||
}
|
||||
|
||||
it("should not show expiry values if shared with public link", () => {
|
||||
const shareObjectDetails = { show: true, object: "obj1", url: "test", showExpiryDate: false }
|
||||
const wrapper = shallow(<ShareObjectModal {...props} shareObjectDetails={shareObjectDetails} />)
|
||||
expect(wrapper.find('.set-expire').exists()).toEqual(false)
|
||||
})
|
||||
|
||||
it("should have default expiry values", () => {
|
||||
const wrapper = shallow(<ShareObjectModal {...props} />)
|
||||
expect(wrapper.state("expiry")).toEqual({
|
||||
|
||||
@@ -34,6 +34,7 @@ jest.mock("../../web", () => ({
|
||||
.mockReturnValueOnce(false)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(false),
|
||||
ListObjects: jest.fn(({ bucketName }) => {
|
||||
if (bucketName === "test-deny") {
|
||||
@@ -70,6 +71,16 @@ jest.mock("../../web", () => ({
|
||||
.mockImplementationOnce(() => {
|
||||
return Promise.resolve({ token: "test" })
|
||||
})
|
||||
.mockImplementationOnce(() => {
|
||||
return Promise.resolve({ token: "test" })
|
||||
}),
|
||||
GetBucketPolicy: jest.fn(({ bucketName, prefix }) => {
|
||||
if (!bucketName) {
|
||||
return Promise.reject({ message: "Invalid bucket" })
|
||||
}
|
||||
if (bucketName === 'test-public') return Promise.resolve({ policy: 'readonly' })
|
||||
return Promise.resolve({})
|
||||
})
|
||||
}))
|
||||
|
||||
const middlewares = [thunk]
|
||||
@@ -295,7 +306,8 @@ describe("Objects actions", () => {
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "b.txt",
|
||||
url: "test"
|
||||
url: "test",
|
||||
showExpiryDate: true
|
||||
}
|
||||
]
|
||||
store.dispatch(actionsObjects.showShareObject("b.txt", "test"))
|
||||
@@ -321,14 +333,16 @@ describe("Objects actions", () => {
|
||||
it("creates objects/SET_SHARE_OBJECT when object is shared", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "bk1" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: {} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "a.txt",
|
||||
url: "https://test.com/bk1/pre1/b.txt"
|
||||
url: "https://test.com/bk1/pre1/b.txt",
|
||||
showExpiryDate: true
|
||||
},
|
||||
{
|
||||
type: "alert/SET",
|
||||
@@ -347,10 +361,42 @@ describe("Objects actions", () => {
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/SET_SHARE_OBJECT when object is shared with public link", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "test-public" },
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: { info: { domains: ['public.com'] }} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "a.txt",
|
||||
url: "public.com/test-public/pre1/a.txt",
|
||||
showExpiryDate: false
|
||||
},
|
||||
{
|
||||
type: "alert/SET",
|
||||
alert: {
|
||||
type: "success",
|
||||
message: "Object shared.",
|
||||
id: alertActions.alertId
|
||||
}
|
||||
}
|
||||
]
|
||||
return store
|
||||
.dispatch(actionsObjects.shareObject("a.txt", 1, 0, 0))
|
||||
.then(() => {
|
||||
const actions = store.getActions()
|
||||
expect(actions).toEqual(expectedActions)
|
||||
})
|
||||
})
|
||||
|
||||
it("creates alert/SET when shareObject is failed", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: {} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
@@ -442,6 +488,34 @@ describe("Objects actions", () => {
|
||||
})
|
||||
})
|
||||
|
||||
it("should download prefix", () => {
|
||||
const open = jest.fn()
|
||||
const send = jest.fn()
|
||||
const xhrMockClass = () => ({
|
||||
open: open,
|
||||
send: send
|
||||
})
|
||||
window.XMLHttpRequest = jest.fn().mockImplementation(xhrMockClass)
|
||||
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "bk1" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
})
|
||||
return store.dispatch(actionsObjects.downloadPrefix("pre2/")).then(() => {
|
||||
const requestUrl = `${
|
||||
location.origin
|
||||
}${minioBrowserPrefix}/zip?token=test`
|
||||
expect(open).toHaveBeenCalledWith("POST", requestUrl, true)
|
||||
expect(send).toHaveBeenCalledWith(
|
||||
JSON.stringify({
|
||||
bucketName: "bk1",
|
||||
prefix: "pre1/",
|
||||
objects: ["pre2/"]
|
||||
})
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/CHECKED_LIST_ADD action", () => {
|
||||
const store = mockStore()
|
||||
const expectedActions = [
|
||||
|
||||
@@ -19,20 +19,20 @@ import history from "../history"
|
||||
import {
|
||||
sortObjectsByName,
|
||||
sortObjectsBySize,
|
||||
sortObjectsByDate
|
||||
sortObjectsByDate,
|
||||
} from "../utils"
|
||||
import { getCurrentBucket } from "../buckets/selectors"
|
||||
import { getCurrentPrefix, getCheckedList } from "./selectors"
|
||||
import * as alertActions from "../alert/actions"
|
||||
import * as bucketActions from "../buckets/actions"
|
||||
import {
|
||||
minioBrowserPrefix,
|
||||
SORT_BY_NAME,
|
||||
SORT_BY_SIZE,
|
||||
SORT_BY_LAST_MODIFIED,
|
||||
SORT_ORDER_ASC,
|
||||
SORT_ORDER_DESC
|
||||
SORT_ORDER_DESC,
|
||||
} from "../constants"
|
||||
import { getServerInfo, hasServerPublicDomain } from '../browser/selectors'
|
||||
|
||||
export const SET_LIST = "objects/SET_LIST"
|
||||
export const RESET_LIST = "objects/RESET_LIST"
|
||||
@@ -48,35 +48,35 @@ export const CHECKED_LIST_REMOVE = "objects/CHECKED_LIST_REMOVE"
|
||||
export const CHECKED_LIST_RESET = "objects/CHECKED_LIST_RESET"
|
||||
export const SET_LIST_LOADING = "objects/SET_LIST_LOADING"
|
||||
|
||||
export const setList = objects => ({
|
||||
export const setList = (objects) => ({
|
||||
type: SET_LIST,
|
||||
objects
|
||||
objects,
|
||||
})
|
||||
|
||||
export const resetList = () => ({
|
||||
type: RESET_LIST
|
||||
type: RESET_LIST,
|
||||
})
|
||||
|
||||
export const setListLoading = listLoading => ({
|
||||
export const setListLoading = (listLoading) => ({
|
||||
type: SET_LIST_LOADING,
|
||||
listLoading
|
||||
listLoading,
|
||||
})
|
||||
|
||||
export const fetchObjects = () => {
|
||||
return function(dispatch, getState) {
|
||||
return function (dispatch, getState) {
|
||||
dispatch(resetList())
|
||||
const {
|
||||
buckets: { currentBucket },
|
||||
objects: { currentPrefix }
|
||||
objects: { currentPrefix },
|
||||
} = getState()
|
||||
if (currentBucket) {
|
||||
dispatch(setListLoading(true))
|
||||
return web
|
||||
.ListObjects({
|
||||
bucketName: currentBucket,
|
||||
prefix: currentPrefix
|
||||
prefix: currentPrefix,
|
||||
})
|
||||
.then(res => {
|
||||
.then((res) => {
|
||||
// we need to check if the bucket name and prefix are the same as
|
||||
// when the request was made before updating the displayed objects
|
||||
if (
|
||||
@@ -85,10 +85,10 @@ export const fetchObjects = () => {
|
||||
) {
|
||||
let objects = []
|
||||
if (res.objects) {
|
||||
objects = res.objects.map(object => {
|
||||
objects = res.objects.map((object) => {
|
||||
return {
|
||||
...object,
|
||||
name: object.name.replace(currentPrefix, "")
|
||||
name: object.name.replace(currentPrefix, ""),
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -104,13 +104,13 @@ export const fetchObjects = () => {
|
||||
dispatch(setListLoading(false))
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
.catch((err) => {
|
||||
if (web.LoggedIn()) {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
autoClear: true
|
||||
autoClear: true,
|
||||
})
|
||||
)
|
||||
dispatch(resetList())
|
||||
@@ -123,8 +123,8 @@ export const fetchObjects = () => {
|
||||
}
|
||||
}
|
||||
|
||||
export const sortObjects = sortBy => {
|
||||
return function(dispatch, getState) {
|
||||
export const sortObjects = (sortBy) => {
|
||||
return function (dispatch, getState) {
|
||||
const { objects } = getState()
|
||||
let sortOrder = SORT_ORDER_ASC
|
||||
// Reverse sort order if the list is already sorted on same field
|
||||
@@ -149,18 +149,18 @@ const sortObjectsList = (list, sortBy, sortOrder) => {
|
||||
}
|
||||
}
|
||||
|
||||
export const setSortBy = sortBy => ({
|
||||
export const setSortBy = (sortBy) => ({
|
||||
type: SET_SORT_BY,
|
||||
sortBy
|
||||
sortBy,
|
||||
})
|
||||
|
||||
export const setSortOrder = sortOrder => ({
|
||||
export const setSortOrder = (sortOrder) => ({
|
||||
type: SET_SORT_ORDER,
|
||||
sortOrder
|
||||
sortOrder,
|
||||
})
|
||||
|
||||
export const selectPrefix = prefix => {
|
||||
return function(dispatch, getState) {
|
||||
export const selectPrefix = (prefix) => {
|
||||
return function (dispatch, getState) {
|
||||
dispatch(setCurrentPrefix(prefix))
|
||||
dispatch(fetchObjects())
|
||||
dispatch(resetCheckedList())
|
||||
@@ -169,49 +169,49 @@ export const selectPrefix = prefix => {
|
||||
}
|
||||
}
|
||||
|
||||
export const setCurrentPrefix = prefix => {
|
||||
export const setCurrentPrefix = (prefix) => {
|
||||
return {
|
||||
type: SET_CURRENT_PREFIX,
|
||||
prefix
|
||||
prefix,
|
||||
}
|
||||
}
|
||||
|
||||
export const setPrefixWritable = prefixWritable => ({
|
||||
export const setPrefixWritable = (prefixWritable) => ({
|
||||
type: SET_PREFIX_WRITABLE,
|
||||
prefixWritable
|
||||
prefixWritable,
|
||||
})
|
||||
|
||||
export const deleteObject = object => {
|
||||
return function(dispatch, getState) {
|
||||
export const deleteObject = (object) => {
|
||||
return function (dispatch, getState) {
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
return web
|
||||
.RemoveObject({
|
||||
bucketName: currentBucket,
|
||||
objects: [objectName]
|
||||
objects: [objectName],
|
||||
})
|
||||
.then(() => {
|
||||
dispatch(removeObject(object))
|
||||
})
|
||||
.catch(e => {
|
||||
.catch((e) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: e.message
|
||||
message: e.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export const removeObject = object => ({
|
||||
export const removeObject = (object) => ({
|
||||
type: REMOVE,
|
||||
object
|
||||
object,
|
||||
})
|
||||
|
||||
export const deleteCheckedObjects = () => {
|
||||
return function(dispatch, getState) {
|
||||
return function (dispatch, getState) {
|
||||
const checkedObjects = getCheckedList(getState())
|
||||
for (let i = 0; i < checkedObjects.length; i++) {
|
||||
dispatch(deleteObject(checkedObjects[i]))
|
||||
@@ -221,33 +221,52 @@ export const deleteCheckedObjects = () => {
|
||||
}
|
||||
|
||||
export const shareObject = (object, days, hours, minutes) => {
|
||||
return function(dispatch, getState) {
|
||||
return function (dispatch, getState) {
|
||||
const hasServerDomain = hasServerPublicDomain(getState())
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
const expiry = days * 24 * 60 * 60 + hours * 60 * 60 + minutes * 60
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName,
|
||||
expiry: expiry
|
||||
.GetBucketPolicy({ bucketName: currentBucket, prefix: currentPrefix })
|
||||
.catch(() => ({ policy: null }))
|
||||
.then(({ policy }) => {
|
||||
if (hasServerDomain && ['readonly', 'readwrite'].includes(policy)) {
|
||||
const domain = getServerInfo(getState()).info.domains[0]
|
||||
const url = `${domain}/${currentBucket}/${encodeURI(objectName)}`
|
||||
dispatch(showShareObject(object, url, false))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: "Object shared."
|
||||
})
|
||||
)
|
||||
} else {
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName,
|
||||
expiry: expiry
|
||||
})
|
||||
}
|
||||
})
|
||||
.then(obj => {
|
||||
.then((obj) => {
|
||||
if (!obj) return
|
||||
dispatch(showShareObject(object, obj.url))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`,
|
||||
})
|
||||
)
|
||||
})
|
||||
.catch(err => {
|
||||
.catch((err) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
@@ -265,29 +284,29 @@ export const shareObject = (object, days, hours, minutes) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared.`
|
||||
message: `Object shared.`,
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const showShareObject = (object, url) => ({
|
||||
export const showShareObject = (object, url, showExpiryDate = true) => ({
|
||||
type: SET_SHARE_OBJECT,
|
||||
show: true,
|
||||
object,
|
||||
url
|
||||
url,
|
||||
showExpiryDate,
|
||||
})
|
||||
|
||||
export const hideShareObject = (object, url) => ({
|
||||
type: SET_SHARE_OBJECT,
|
||||
show: false,
|
||||
object: "",
|
||||
url: ""
|
||||
url: "",
|
||||
})
|
||||
|
||||
export const downloadObject = object => {
|
||||
return function(dispatch, getState) {
|
||||
export const getObjectURL = (object, callback) => {
|
||||
return function (dispatch, getState) {
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
@@ -295,78 +314,119 @@ export const downloadObject = object => {
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then(res => {
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${
|
||||
res.token
|
||||
}`
|
||||
window.location = url
|
||||
.then((res) => {
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${res.token}`
|
||||
callback(url)
|
||||
})
|
||||
.catch(err => {
|
||||
.catch((err) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
} else {
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
callback(url)
|
||||
}
|
||||
}
|
||||
}
|
||||
export const downloadObject = (object) => {
|
||||
return function (dispatch, getState) {
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
const encObjectName = encodeURI(objectName)
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then((res) => {
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${res.token}`
|
||||
window.location = url
|
||||
})
|
||||
.catch((err) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
} else {
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
window.location = url
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const checkObject = object => ({
|
||||
type: CHECKED_LIST_ADD,
|
||||
object
|
||||
})
|
||||
|
||||
export const uncheckObject = object => ({
|
||||
type: CHECKED_LIST_REMOVE,
|
||||
object
|
||||
})
|
||||
|
||||
export const resetCheckedList = () => ({
|
||||
type: CHECKED_LIST_RESET
|
||||
})
|
||||
|
||||
export const downloadCheckedObjects = () => {
|
||||
return function(dispatch, getState) {
|
||||
const state = getState()
|
||||
const req = {
|
||||
bucketName: getCurrentBucket(state),
|
||||
prefix: getCurrentPrefix(state),
|
||||
objects: getCheckedList(state)
|
||||
}
|
||||
if (!web.LoggedIn()) {
|
||||
const requestUrl = location.origin + "/minio/zip?token="
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
} else {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then(res => {
|
||||
const requestUrl = `${
|
||||
location.origin
|
||||
}${minioBrowserPrefix}/zip?token=${res.token}`
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
})
|
||||
.catch(err =>
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
)
|
||||
}
|
||||
export const downloadPrefix = (object) => {
|
||||
return function (dispatch, getState) {
|
||||
return downloadObjects(
|
||||
getCurrentBucket(getState()),
|
||||
getCurrentPrefix(getState()),
|
||||
[object],
|
||||
`${object.slice(0, -1)}.zip`,
|
||||
dispatch
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const downloadZip = (url, req, dispatch) => {
|
||||
|
||||
export const checkObject = (object) => ({
|
||||
type: CHECKED_LIST_ADD,
|
||||
object,
|
||||
})
|
||||
|
||||
export const uncheckObject = (object) => ({
|
||||
type: CHECKED_LIST_REMOVE,
|
||||
object,
|
||||
})
|
||||
|
||||
export const resetCheckedList = () => ({
|
||||
type: CHECKED_LIST_RESET,
|
||||
})
|
||||
|
||||
export const downloadCheckedObjects = () => {
|
||||
return function (dispatch, getState) {
|
||||
return downloadObjects(
|
||||
getCurrentBucket(getState()),
|
||||
getCurrentPrefix(getState()),
|
||||
getCheckedList(getState()),
|
||||
null,
|
||||
dispatch
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const downloadObjects = (bucketName, prefix, objects, filename, dispatch) => {
|
||||
const req = {
|
||||
bucketName: bucketName,
|
||||
prefix: prefix,
|
||||
objects: objects,
|
||||
}
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then((res) => {
|
||||
const requestUrl = `${location.origin}${minioBrowserPrefix}/zip?token=${res.token}`
|
||||
downloadZip(requestUrl, req, filename, dispatch)
|
||||
})
|
||||
.catch((err) =>
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
)
|
||||
} else {
|
||||
const requestUrl = `${location.origin}${minioBrowserPrefix}/zip?token=`
|
||||
downloadZip(requestUrl, req, filename, dispatch)
|
||||
}
|
||||
}
|
||||
|
||||
const downloadZip = (url, req, filename, dispatch) => {
|
||||
var anchor = document.createElement("a")
|
||||
document.body.appendChild(anchor)
|
||||
|
||||
@@ -374,17 +434,17 @@ const downloadZip = (url, req, dispatch) => {
|
||||
xhr.open("POST", url, true)
|
||||
xhr.responseType = "blob"
|
||||
|
||||
xhr.onload = function(e) {
|
||||
xhr.onload = function (e) {
|
||||
if (this.status == 200) {
|
||||
dispatch(resetCheckedList())
|
||||
var blob = new Blob([this.response], {
|
||||
type: "octet/stream"
|
||||
type: "octet/stream",
|
||||
})
|
||||
var blobUrl = window.URL.createObjectURL(blob)
|
||||
var separator = req.prefix.length > 1 ? "-" : ""
|
||||
|
||||
anchor.href = blobUrl
|
||||
anchor.download =
|
||||
anchor.download = filename ||
|
||||
req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
|
||||
|
||||
anchor.click()
|
||||
|
||||
@@ -89,7 +89,8 @@ export default (
|
||||
shareObject: {
|
||||
show: action.show,
|
||||
object: action.object,
|
||||
url: action.url
|
||||
url: action.url,
|
||||
showExpiryDate: action.showExpiryDate
|
||||
}
|
||||
}
|
||||
case actionsObjects.CHECKED_LIST_ADD:
|
||||
|
||||
@@ -48,18 +48,29 @@ export class Dropzone extends React.Component {
|
||||
const rejectStyle = {
|
||||
backgroundColor: "#ffdddd"
|
||||
}
|
||||
const getStyle = (isDragActive, isDragAccept, isDragReject) => ({
|
||||
...style,
|
||||
...(isDragActive ? activeStyle : {}),
|
||||
...(isDragReject ? rejectStyle : {})
|
||||
})
|
||||
|
||||
// disableClick means that it won't trigger a file upload box when
|
||||
// the user clicks on a file.
|
||||
return (
|
||||
<ReactDropzone
|
||||
style={style}
|
||||
activeStyle={activeStyle}
|
||||
rejectStyle={rejectStyle}
|
||||
disableClick={true}
|
||||
onDrop={this.onDrop.bind(this)}
|
||||
>
|
||||
{this.props.children}
|
||||
{({getRootProps, getInputProps, isDragActive, isDragAccept, isDragReject}) => (
|
||||
<div
|
||||
{...getRootProps({
|
||||
onClick: event => event.stopPropagation()
|
||||
})}
|
||||
style={getStyle(isDragActive, isDragAccept, isDragReject)}
|
||||
>
|
||||
<input {...getInputProps()} />
|
||||
{this.props.children}
|
||||
</div>
|
||||
)}
|
||||
</ReactDropzone>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -89,11 +89,16 @@ export const uploadFile = file => {
|
||||
return
|
||||
}
|
||||
const currentPrefix = getCurrentPrefix(state)
|
||||
const objectName = `${currentPrefix}${file.name}`
|
||||
var _filePath = file.path || file.name
|
||||
if (_filePath.charAt(0) == '/') {
|
||||
_filePath = _filePath.substring(1)
|
||||
}
|
||||
const filePath = _filePath
|
||||
const objectName = `${currentPrefix}${filePath}`
|
||||
const uploadUrl = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/upload/${currentBucket}/${objectName}`
|
||||
const slug = `${currentBucket}-${currentPrefix}-${file.name}`
|
||||
const slug = `${currentBucket}-${currentPrefix}-${filePath}`
|
||||
|
||||
let xhr = new XMLHttpRequest()
|
||||
xhr.open("PUT", uploadUrl, true)
|
||||
@@ -141,7 +146,7 @@ export const uploadFile = file => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: "File '" + file.name + "' uploaded successfully."
|
||||
message: "File '" + filePath + "' uploaded successfully."
|
||||
})
|
||||
)
|
||||
dispatch(objectsActions.selectPrefix(currentPrefix))
|
||||
@@ -153,7 +158,7 @@ export const uploadFile = file => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: "Error occurred uploading '" + file.name + "'."
|
||||
message: "Error occurred uploading '" + filePath + "'."
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
@@ -105,7 +105,7 @@ div.fesl-row {
|
||||
|
||||
.fesl-item-name {
|
||||
a {
|
||||
cursor: default;
|
||||
cursor: pointer;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,12 +114,6 @@ div.fesl-row {
|
||||
----------------------------*/
|
||||
&[data-type=folder] {
|
||||
.list-type(#a1d6dd, '\f07b');
|
||||
|
||||
.fesl-item-name {
|
||||
a {
|
||||
cursor: pointer;
|
||||
}
|
||||
}
|
||||
}
|
||||
&[data-type=pdf] {.list-type(#fa7775, '\f1c1'); }
|
||||
&[data-type=zip] { .list-type(#427089, '\f1c6'); }
|
||||
|
||||
10690
browser/package-lock.json
generated
10690
browser/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -28,71 +28,70 @@
|
||||
},
|
||||
"homepage": "https://github.com/minio/minio",
|
||||
"devDependencies": {
|
||||
"async": "^1.5.2",
|
||||
"async": "^3.2.0",
|
||||
"babel-cli": "^6.26.0",
|
||||
"babel-core": "^6.26.3",
|
||||
"babel-jest": "^22.1.0",
|
||||
"babel-jest": "^23.6.0",
|
||||
"babel-loader": "^7.1.2",
|
||||
"babel-plugin-syntax-object-rest-spread": "^6.13.0",
|
||||
"babel-plugin-transform-object-rest-spread": "^6.8.0",
|
||||
"babel-polyfill": "^6.23.0",
|
||||
"babel-plugin-transform-object-rest-spread": "^6.26.0",
|
||||
"babel-polyfill": "^6.26.0",
|
||||
"babel-preset-es2015": "^6.14.0",
|
||||
"babel-preset-react": "^6.11.1",
|
||||
"babel-register": "^6.26.0",
|
||||
"copy-webpack-plugin": "^4.6.0",
|
||||
"css-loader": "^0.23.1",
|
||||
"enzyme": "^3.10.0",
|
||||
"enzyme-adapter-react-16": "^1.1.1",
|
||||
"esformatter": "^0.10.0",
|
||||
"esformatter-jsx": "^7.4.1",
|
||||
"copy-webpack-plugin": "^6.0.1",
|
||||
"css-loader": "^3.5.3",
|
||||
"enzyme": "^3.11.0",
|
||||
"enzyme-adapter-react-16": "^1.15.2",
|
||||
"esformatter": "^0.11.3",
|
||||
"esformatter-jsx": "^8.0.1",
|
||||
"esformatter-jsx-ignore": "^1.0.6",
|
||||
"html-webpack-plugin": "^3.2.0",
|
||||
"jest": "^22.1.4",
|
||||
"jest-enzyme": "^4.0.2",
|
||||
"json-loader": "^0.5.4",
|
||||
"less": "^3.9.0",
|
||||
"less-loader": "^4.1.0",
|
||||
"purgecss-webpack-plugin": "^1.4.0",
|
||||
"style-loader": "^0.13.1",
|
||||
"url-loader": "^0.5.7",
|
||||
"webpack-cli": "^3.2.0",
|
||||
"webpack-dev-server": "^3.1.14"
|
||||
"html-webpack-plugin": "^4.3.0",
|
||||
"jest": "^23.6.0",
|
||||
"jest-enzyme": "^7.1.2",
|
||||
"json-loader": "^0.5.7",
|
||||
"less": "^3.11.1",
|
||||
"less-loader": "^6.1.0",
|
||||
"purgecss-webpack-plugin": "^2.2.0",
|
||||
"style-loader": "^1.2.1",
|
||||
"url-loader": "^4.1.0",
|
||||
"webpack-cli": "^3.3.11",
|
||||
"webpack-dev-server": "^3.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@fortawesome/fontawesome-free": "^5.10.0",
|
||||
"@fortawesome/fontawesome-free": "^5.13.0",
|
||||
"bootstrap": "^3.4.1",
|
||||
"classnames": "^2.2.3",
|
||||
"core-js": "^3.2.1",
|
||||
"expect": "^1.20.2",
|
||||
"glob-all": "^3.1.0",
|
||||
"history": "^4.7.2",
|
||||
"classnames": "^2.2.6",
|
||||
"core-js": "^3.6.5",
|
||||
"expect": "^26.0.1",
|
||||
"glob-all": "^3.2.1",
|
||||
"history": "^4.10.1",
|
||||
"humanize": "0.0.9",
|
||||
"identity-obj-proxy": "^3.0.0",
|
||||
"json-loader": "^0.5.4",
|
||||
"jwt-decode": "^2.2.0",
|
||||
"local-storage-fallback": "^4.0.2",
|
||||
"local-storage-fallback": "^4.1.1",
|
||||
"material-design-iconic-font": "^2.2.0",
|
||||
"mime-db": "^1.25.0",
|
||||
"mime-types": "^2.1.13",
|
||||
"moment": "^2.24.0",
|
||||
"query-string": "^6.8.2",
|
||||
"react": "^16.2.0",
|
||||
"react-addons-test-utils": "^0.14.8",
|
||||
"react-bootstrap": "^0.32.0",
|
||||
"react-copy-to-clipboard": "^5.0.1",
|
||||
"mime-db": "^1.44.0",
|
||||
"mime-types": "^2.1.27",
|
||||
"moment": "^2.26.0",
|
||||
"query-string": "^6.12.1",
|
||||
"react": "^16.13.1",
|
||||
"react-addons-test-utils": "^15.6.2",
|
||||
"react-bootstrap": "^0.32.4",
|
||||
"react-copy-to-clipboard": "^5.0.2",
|
||||
"react-custom-scrollbars": "^4.2.1",
|
||||
"react-dom": "^16.2.0",
|
||||
"react-dropzone": "^4.2.3",
|
||||
"react-infinite-scroller": "^1.0.6",
|
||||
"react-dom": "^16.13.1",
|
||||
"react-dropzone": "^11.0.1",
|
||||
"react-infinite-scroller": "^1.2.4",
|
||||
"react-onclickout": "^2.0.8",
|
||||
"react-redux": "^5.0.6",
|
||||
"react-router-dom": "^4.2.0",
|
||||
"redux": "^3.7.2",
|
||||
"redux-mock-store": "^1.5.1",
|
||||
"redux-thunk": "^2.2.0",
|
||||
"reselect": "^3.0.1",
|
||||
"superagent": "^3.8.2",
|
||||
"react-redux": "^5.1.2",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"redux": "^4.0.5",
|
||||
"redux-mock-store": "^1.5.4",
|
||||
"redux-thunk": "^2.3.0",
|
||||
"reselect": "^4.0.0",
|
||||
"superagent": "^5.2.2",
|
||||
"superagent-es6-promise": "^1.0.0",
|
||||
"webpack": "^4.28.3"
|
||||
"webpack": "^4.43.0"
|
||||
}
|
||||
}
|
||||
|
||||
1
browser/staticcheck.conf
Normal file
1
browser/staticcheck.conf
Normal file
@@ -0,0 +1 @@
|
||||
checks = ["all", "-ST1005", "-ST1000", "-SA4000", "-SA9004", "-SA1019", "-SA1008", "-U1000", "-ST1003", "-ST1018"]
|
||||
File diff suppressed because one or more lines are too long
@@ -73,17 +73,17 @@ var exports = {
|
||||
},
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
secure: false,
|
||||
headers: {'Host': "localhost:9000"}
|
||||
target: 'http://localhost:9000',
|
||||
secure: false,
|
||||
headers: {'Host': "localhost:9000"}
|
||||
},
|
||||
'/minio/upload/*': {
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
},
|
||||
'/minio/download/*': {
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
},
|
||||
'/minio/zip': {
|
||||
target: 'http://localhost:9000',
|
||||
@@ -92,7 +92,7 @@ var exports = {
|
||||
}
|
||||
},
|
||||
plugins: [
|
||||
new CopyWebpackPlugin([
|
||||
new CopyWebpackPlugin({patterns: [
|
||||
{from: 'app/css/loader.css'},
|
||||
{from: 'app/img/browsers/chrome.png'},
|
||||
{from: 'app/img/browsers/firefox.png'},
|
||||
@@ -102,7 +102,7 @@ var exports = {
|
||||
{from: 'app/img/favicon/favicon-32x32.png'},
|
||||
{from: 'app/img/favicon/favicon-96x96.png'},
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
]}),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
|
||||
@@ -67,7 +67,7 @@ var exports = {
|
||||
fs:'empty'
|
||||
},
|
||||
plugins: [
|
||||
new CopyWebpackPlugin([
|
||||
new CopyWebpackPlugin({patterns: [
|
||||
{from: 'app/css/loader.css'},
|
||||
{from: 'app/img/browsers/chrome.png'},
|
||||
{from: 'app/img/browsers/firefox.png'},
|
||||
@@ -77,7 +77,7 @@ var exports = {
|
||||
{from: 'app/img/favicon/favicon-32x32.png'},
|
||||
{from: 'app/img/favicon/favicon-96x96.png'},
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
]}),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
|
||||
@@ -9,7 +9,7 @@ function _init() {
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
@@ -20,11 +20,11 @@ function _build() {
|
||||
package=$(go list -f '{{.ImportPath}}')
|
||||
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
|
||||
|
||||
# Go build to build the binary.
|
||||
# go build -trimpath to build the binary.
|
||||
export GOOS=$os
|
||||
export GOARCH=$arch
|
||||
export GO111MODULE=on
|
||||
go build -tags kqueue -o /dev/null
|
||||
go build -trimpath -tags kqueue -o /dev/null
|
||||
}
|
||||
|
||||
function main() {
|
||||
|
||||
@@ -46,7 +46,10 @@ function main()
|
||||
gw_pid="$(start_minio_gateway_s3)"
|
||||
|
||||
SERVER_ENDPOINT=127.0.0.1:24240 ENABLE_HTTPS=0 ACCESS_KEY=minio \
|
||||
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh
|
||||
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh \
|
||||
aws-sdk-go aws-sdk-java aws-sdk-php aws-sdk-ruby awscli \
|
||||
healthcheck mc minio-dotnet minio-js \
|
||||
minio-py s3cmd s3select security
|
||||
rv=$?
|
||||
|
||||
kill "$sr_pid"
|
||||
|
||||
@@ -3,5 +3,5 @@
|
||||
set -e
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
CGO_ENABLED=1 go test -v -race --timeout 20m "$d"
|
||||
CGO_ENABLED=1 go test -v -race --timeout 50m "$d"
|
||||
done
|
||||
|
||||
@@ -154,34 +154,6 @@ function run_test_erasure_sets() {
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_dist_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure_sets_ipv6) )
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 9); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 9); do
|
||||
rm -f "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_zone_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets) )
|
||||
|
||||
@@ -30,33 +30,50 @@ MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
function start_minio_3_node() {
|
||||
declare -a minio_pids
|
||||
declare -a ARGS
|
||||
export MINIO_ACCESS_KEY=minio
|
||||
export MINIO_SECRET_KEY=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
for i in $(seq 1 3); do
|
||||
ARGS+=("http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/6/")
|
||||
ARGS+=("http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/")
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":8001" ${ARGS[@]} > "${WORK_DIR}/dist-minio-8001.log" 2>&1 &
|
||||
"${MINIO[@]}" --address ":$[$start_port+1]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
disown "${minio_pids[0]}"
|
||||
|
||||
"${MINIO[@]}" --address ":8002" ${ARGS[@]} > "${WORK_DIR}/dist-minio-8002.log" 2>&1 &
|
||||
"${MINIO[@]}" --address ":$[$start_port+2]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
disown "${minio_pids[1]}"
|
||||
|
||||
"${MINIO[@]}" --address ":8003" ${ARGS[@]} > "${WORK_DIR}/dist-minio-8003.log" 2>&1 &
|
||||
"${MINIO[@]}" --address ":$[$start_port+3]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
disown "${minio_pids[2]}"
|
||||
|
||||
sleep "$1"
|
||||
echo "${minio_pids[@]}"
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
if ! kill "$pid"; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
# forcibly killing, to proceed further properly.
|
||||
kill -9 "$pid"
|
||||
sleep 1 # wait 1sec per pid
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
function check_online() {
|
||||
for i in $(seq 1 3); do
|
||||
if grep -q 'Server switching to safe mode' ${WORK_DIR}/dist-minio-$[8000+$i].log; then
|
||||
echo "1"
|
||||
fi
|
||||
done
|
||||
if grep -q 'Server switching to safe mode' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
fi
|
||||
}
|
||||
|
||||
function purge()
|
||||
@@ -75,50 +92,21 @@ function __init__()
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
minio_pids=( $(start_minio_3_node 60) )
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
if ! kill "$pid"; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-$[8000+$i].log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
# forcibly killing, to proceed further properly.
|
||||
kill -9 "$pid"
|
||||
done
|
||||
start_minio_3_node 60
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
minio_pids=( $(start_minio_3_node 60) )
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
if ! kill "$pid"; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-$[8000+$i].log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
# forcibly killing, to proceed further properly.
|
||||
# if the previous kill is taking time.
|
||||
kill -9 "$pid"
|
||||
done
|
||||
start_minio_3_node 60
|
||||
|
||||
rv=$(check_online)
|
||||
if [ "$rv" == "1" ]; then
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill -9 "$pid"
|
||||
done
|
||||
pkill -9 minio
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-$[8000+$i].log"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
|
||||
@@ -100,18 +100,18 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
if len(acl.AccessControlList.Grants) == 0 {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if aclHeader != "" && aclHeader != "private" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -159,6 +159,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
},
|
||||
Permission: "FULL_CONTROL",
|
||||
})
|
||||
|
||||
if err := xml.NewEncoder(w).Encode(acl); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -214,18 +215,18 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
if len(acl.AccessControlList.Grants) == 0 {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if aclHeader != "" && aclHeader != "private" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -35,50 +35,47 @@ import (
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) ObjectLayer {
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil
|
||||
return auth.Credentials{}, nil
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
|
||||
cred, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
return objectAPI
|
||||
return cred, objectAPI
|
||||
}
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v2/del-config-kv
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
|
||||
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DelConfigKVHandler")
|
||||
ctx := newContext(r, w, "DeleteConfigKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "DeleteConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
@@ -103,28 +100,24 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
// SetConfigKVHandler - PUT /minio/admin/v2/set-config-kv
|
||||
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
|
||||
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigKVHandler")
|
||||
ctx := newContext(r, w, "SetConfigKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "SetConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
@@ -162,23 +155,25 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
// Make sure to write backend is encrypted
|
||||
if globalConfigEncrypted {
|
||||
saveConfig(context.Background(), objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetConfigKVHandler - GET /minio/admin/v2/get-config-kv?key={key}
|
||||
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}
|
||||
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigKVHandler")
|
||||
ctx := newContext(r, w, "GetConfigKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "GetConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
cfg := globalServerConfig
|
||||
if globalSafeMode {
|
||||
if newObjectLayerFn() == nil {
|
||||
var err error
|
||||
cfg, err = getValidConfig(objectAPI)
|
||||
if err != nil {
|
||||
@@ -195,7 +190,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, buf.Bytes())
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -206,9 +201,11 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ClearConfigHistoryKVHandler")
|
||||
ctx := newContext(r, w, "ClearConfigHistoryKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "ClearConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -241,9 +238,11 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
|
||||
|
||||
// RestoreConfigHistoryKVHandler - restores a config with KV settings for the given KV id.
|
||||
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RestoreConfigHistoryKVHandler")
|
||||
ctx := newContext(r, w, "RestoreConfigHistoryKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "RestoreConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -287,9 +286,11 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
|
||||
// ListConfigHistoryKVHandler - lists all the KV ids.
|
||||
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListConfigHistoryKVHandler")
|
||||
ctx := newContext(r, w, "ListConfigHistoryKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "ListConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -313,7 +314,7 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -323,11 +324,13 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
// HelpConfigKVHandler - GET /minio/admin/v2/help-config-kv?subSys={subSys}&key={key}
|
||||
// HelpConfigKVHandler - GET /minio/admin/v3/help-config-kv?subSys={subSys}&key={key}
|
||||
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HelpConfigKVHandler")
|
||||
ctx := newContext(r, w, "HelpConfigKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "HelpHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -349,28 +352,24 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// SetConfigHandler - PUT /minio/admin/v2/config
|
||||
// SetConfigHandler - PUT /minio/admin/v3/config
|
||||
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigHandler")
|
||||
ctx := newContext(r, w, "SetConfig")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "SetConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
@@ -403,18 +402,20 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
|
||||
// Make sure to write backend is encrypted
|
||||
if globalConfigEncrypted {
|
||||
saveConfig(context.Background(), objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetConfigHandler - GET /minio/admin/v2/config
|
||||
// GetConfigHandler - GET /minio/admin/v3/config
|
||||
// Get config.json of this minio setup.
|
||||
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigHandler")
|
||||
ctx := newContext(r, w, "GetConfig")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "GetConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -471,7 +472,7 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, []byte(s.String()))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -51,21 +51,17 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
return objectAPI, cred
|
||||
}
|
||||
|
||||
// RemoveUser - DELETE /minio/admin/v2/remove-user?accessKey=<access_key>
|
||||
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
|
||||
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveUser")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveUser", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
@@ -93,15 +89,19 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// ListUsers - GET /minio/admin/v2/list-users
|
||||
// ListUsers - GET /minio/admin/v3/list-users
|
||||
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListUsers")
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
defer logger.AuditLog(w, r, "ListUsers", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
|
||||
allCredentials, err := globalIAMSys.ListUsers()
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -114,7 +114,6 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -124,10 +123,12 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
// GetUserInfo - GET /minio/admin/v2/user-info
|
||||
// GetUserInfo - GET /minio/admin/v3/user-info
|
||||
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetUserInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetUserInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -151,10 +152,12 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// UpdateGroupMembers - PUT /minio/admin/v2/update-group-members
|
||||
// UpdateGroupMembers - PUT /minio/admin/v3/update-group-members
|
||||
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "UpdateGroupMembers")
|
||||
|
||||
defer logger.AuditLog(w, r, "UpdateGroupMembers", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -194,10 +197,12 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
// GetGroup - /minio/admin/v2/group?group=mygroup1
|
||||
// GetGroup - /minio/admin/v3/group?group=mygroup1
|
||||
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetGroup", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -221,10 +226,12 @@ func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// ListGroups - GET /minio/admin/v2/groups
|
||||
// ListGroups - GET /minio/admin/v3/groups
|
||||
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListGroups")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListGroups", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -245,10 +252,12 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SetGroupStatus - PUT /minio/admin/v2/set-group-status?group=mygroup1&status=enabled
|
||||
// SetGroupStatus - PUT /minio/admin/v3/set-group-status?group=mygroup1&status=enabled
|
||||
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetGroupStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetGroupStatus", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -280,21 +289,17 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
}
|
||||
|
||||
// SetUserStatus - PUT /minio/admin/v2/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
|
||||
// SetUserStatus - PUT /minio/admin/v3/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
|
||||
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetUserStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetUserStatus", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
status := vars["status"]
|
||||
@@ -319,21 +324,17 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
}
|
||||
|
||||
// AddUser - PUT /minio/admin/v2/add-user?accessKey=<access_key>
|
||||
// AddUser - PUT /minio/admin/v3/add-user?accessKey=<access_key>
|
||||
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddUser")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddUser", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.CreateUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
@@ -378,16 +379,323 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// InfoCannedPolicy - GET /minio/admin/v2/info-canned-policy?name={policyName}
|
||||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicy")
|
||||
// AddServiceAccount - PUT /minio/admin/v3/add-service-account
|
||||
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var createReq madmin.AddServiceAccountReq
|
||||
if err = json.Unmarshal(reqBytes, &createReq); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, createReq.Policy)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other Minio peers to reload user the service account
|
||||
for _, nerr := range globalNotificationSys.LoadServiceAccount(newCred.AccessKey) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
var createResp = madmin.AddServiceAccountResp{
|
||||
Credentials: auth.Credentials{
|
||||
AccessKey: newCred.AccessKey,
|
||||
SecretKey: newCred.SecretKey,
|
||||
},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(createResp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// ListServiceAccounts - GET /minio/admin/v3/list-service-accounts
|
||||
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListServiceAccounts")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, parentUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var listResp = madmin.ListServiceAccountsResp{
|
||||
Accounts: serviceAccounts,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(listResp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// DeleteServiceAccount - DELETE /minio/admin/v3/delete-service-account
|
||||
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
serviceAccount := mux.Vars(r)["accessKey"]
|
||||
if serviceAccount == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
user, err := globalIAMSys.GetServiceAccountParent(ctx, serviceAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
if parentUser != user || user == "" {
|
||||
// The service account belongs to another user but return not
|
||||
// found error to mitigate brute force attacks. or the
|
||||
// serviceAccount doesn't exist.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServiceAccountNotFound), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// AccountUsageInfoHandler returns usage
|
||||
func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AccountUsageInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "AccountUsageInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Set prefix value for "s3:prefix" policy conditionals.
|
||||
r.Header.Set("prefix", "")
|
||||
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
isAllowedAccess := func(bucketName string) (rd, wr bool) {
|
||||
// Use the following trick to filter in place
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
rd = true
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
wr = true
|
||||
}
|
||||
|
||||
return rd, wr
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Load the latest calculated data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
// log the error, continue with the accounting response
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
accountName := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
accountName = cred.ParentUser
|
||||
}
|
||||
|
||||
acctInfo := madmin.AccountUsageInfo{
|
||||
AccountName: accountName,
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
rd, wr := isAllowedAccess(bucket.Name)
|
||||
if rd || wr {
|
||||
var size uint64
|
||||
// Fetch the data usage of the current bucket
|
||||
if !dataUsageInfo.LastUpdate.IsZero() {
|
||||
size = dataUsageInfo.BucketsUsage[bucket.Name].Size
|
||||
}
|
||||
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketUsageInfo{
|
||||
Name: bucket.Name,
|
||||
Created: bucket.Created,
|
||||
Size: size,
|
||||
Access: madmin.AccountAccess{
|
||||
Read: rd,
|
||||
Write: wr,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
usageInfoJSON, err := json.Marshal(acctInfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, usageInfoJSON)
|
||||
}
|
||||
|
||||
// InfoCannedPolicyV2 - GET /minio/admin/v2/info-canned-policy?name={policyName}
|
||||
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicyV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicyV2", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
data, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
|
||||
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -397,9 +705,32 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ListCannedPolicies - GET /minio/admin/v2/list-canned-policies
|
||||
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPolicies")
|
||||
// InfoCannedPolicy - GET /minio/admin/v3/info-canned-policy?name={policyName}
|
||||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(policy)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ListCannedPoliciesV2 - GET /minio/admin/v2/list-canned-policies
|
||||
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPoliciesV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPoliciesV2", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -412,7 +743,16 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(policies); err != nil {
|
||||
policyMap := make(map[string][]byte, len(policies))
|
||||
for k, p := range policies {
|
||||
var err error
|
||||
policyMap[k], err = json.Marshal(p)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err = json.NewEncoder(w).Encode(policyMap); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -420,10 +760,46 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveCannedPolicy - DELETE /minio/admin/v2/remove-canned-policy?name=<policy_name>
|
||||
// ListCannedPolicies - GET /minio/admin/v3/list-canned-policies
|
||||
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPolicies")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPolicies", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
policies, err := globalIAMSys.ListPolicies()
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var newPolicies = make(map[string]iampolicy.Policy)
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
newPolicies[name] = p
|
||||
}
|
||||
if err = json.NewEncoder(w).Encode(newPolicies); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveCannedPolicy - DELETE /minio/admin/v3/remove-canned-policy?name=<policy_name>
|
||||
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -432,12 +808,6 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["name"]
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -452,10 +822,12 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
// AddCannedPolicy - PUT /minio/admin/v2/add-canned-policy?name=<policy_name>
|
||||
// AddCannedPolicy - PUT /minio/admin/v3/add-canned-policy?name=<policy_name>
|
||||
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -464,12 +836,6 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["name"]
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Error out if Content-Length is missing.
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
|
||||
@@ -508,10 +874,12 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
}
|
||||
|
||||
// SetPolicyForUserOrGroup - PUT /minio/admin/v2/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetPolicyForUserOrGroup", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -522,15 +890,9 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
entityName := vars["userOrGroup"]
|
||||
isGroup := vars["isGroup"] == "true"
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !isGroup {
|
||||
ok, err := globalIAMSys.IsTempUser(entityName)
|
||||
if err != nil {
|
||||
if err != nil && err != errNoSuchUser {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -33,26 +33,27 @@ import (
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
// adminXLTestBed - encapsulates subsystems that need to be setup for
|
||||
// adminErasureTestBed - encapsulates subsystems that need to be setup for
|
||||
// admin-handler unit tests.
|
||||
type adminXLTestBed struct {
|
||||
xlDirs []string
|
||||
objLayer ObjectLayer
|
||||
router *mux.Router
|
||||
type adminErasureTestBed struct {
|
||||
erasureDirs []string
|
||||
objLayer ObjectLayer
|
||||
router *mux.Router
|
||||
}
|
||||
|
||||
// prepareAdminXLTestBed - helper function that setups a single-node
|
||||
// XL backend for admin-handler tests.
|
||||
func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
// prepareAdminErasureTestBed - helper function that setups a single-node
|
||||
// Erasure backend for admin-handler tests.
|
||||
func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, error) {
|
||||
|
||||
// reset global variables to start afresh.
|
||||
resetTestGlobals()
|
||||
|
||||
// Set globalIsXL to indicate that the setup uses an erasure
|
||||
// Set globalIsErasure to indicate that the setup uses an erasure
|
||||
// code backend.
|
||||
globalIsXL = true
|
||||
globalIsErasure = true
|
||||
|
||||
// Initializing objectLayer for HealFormatHandler.
|
||||
objLayer, xlDirs, xlErr := initTestXLObjLayer()
|
||||
objLayer, erasureDirs, xlErr := initTestErasureObjLayer(ctx)
|
||||
if xlErr != nil {
|
||||
return nil, xlErr
|
||||
}
|
||||
@@ -65,58 +66,47 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
// Initialize boot time
|
||||
globalBootTime = UTCNow()
|
||||
|
||||
globalEndpoints = mustGetZoneEndpoints(xlDirs...)
|
||||
globalEndpoints = mustGetZoneEndpoints(erasureDirs...)
|
||||
|
||||
globalConfigSys = NewConfigSys()
|
||||
newAllSubsystems()
|
||||
|
||||
globalIAMSys = NewIAMSys()
|
||||
globalIAMSys.Init(objLayer)
|
||||
|
||||
buckets, err := objLayer.ListBuckets(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
globalPolicySys = NewPolicySys()
|
||||
globalPolicySys.Init(buckets, objLayer)
|
||||
|
||||
globalNotificationSys = NewNotificationSys(globalEndpoints)
|
||||
globalNotificationSys.Init(buckets, objLayer)
|
||||
initAllSubsystems(ctx, objLayer)
|
||||
|
||||
// Setup admin mgmt REST API handlers.
|
||||
adminRouter := mux.NewRouter()
|
||||
registerAdminRouter(adminRouter, true, true)
|
||||
|
||||
return &adminXLTestBed{
|
||||
xlDirs: xlDirs,
|
||||
objLayer: objLayer,
|
||||
router: adminRouter,
|
||||
return &adminErasureTestBed{
|
||||
erasureDirs: erasureDirs,
|
||||
objLayer: objLayer,
|
||||
router: adminRouter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TearDown - method that resets the test bed for subsequent unit
|
||||
// tests to start afresh.
|
||||
func (atb *adminXLTestBed) TearDown() {
|
||||
removeRoots(atb.xlDirs)
|
||||
func (atb *adminErasureTestBed) TearDown() {
|
||||
removeRoots(atb.erasureDirs)
|
||||
resetTestGlobals()
|
||||
}
|
||||
|
||||
// initTestObjLayer - Helper function to initialize an XL-based object
|
||||
// initTestObjLayer - Helper function to initialize an Erasure-based object
|
||||
// layer and set globalObjectAPI.
|
||||
func initTestXLObjLayer() (ObjectLayer, []string, error) {
|
||||
xlDirs, err := getRandomDisks(16)
|
||||
func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
|
||||
erasureDirs, err := getRandomDisks(16)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
endpoints := mustGetNewEndpoints(xlDirs...)
|
||||
format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "")
|
||||
endpoints := mustGetNewEndpoints(erasureDirs...)
|
||||
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "")
|
||||
if err != nil {
|
||||
removeRoots(xlDirs)
|
||||
removeRoots(erasureDirs)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
globalPolicySys = NewPolicySys()
|
||||
objLayer, err := newXLSets(endpoints, format, 1, 16)
|
||||
objLayer := &erasureZones{zones: make([]*erasureSets, 1)}
|
||||
objLayer.zones[0], err = newErasureSets(ctx, endpoints, storageDisks, format)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -125,7 +115,7 @@ func initTestXLObjLayer() (ObjectLayer, []string, error) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = objLayer
|
||||
globalObjLayerMutex.Unlock()
|
||||
return objLayer, xlDirs, nil
|
||||
return objLayer, erasureDirs, nil
|
||||
}
|
||||
|
||||
// cmdType - Represents different service subcomands like status, stop
|
||||
@@ -191,9 +181,12 @@ func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, er
|
||||
// testServicesCmdHandler - parametrizes service subcommand tests on
|
||||
// cmdType value.
|
||||
func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
adminTestBed, err := prepareAdminErasureTestBed(ctx)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.")
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
@@ -259,9 +252,12 @@ func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
}
|
||||
|
||||
func TestAdminServerInfo(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
adminTestBed, err := prepareAdminErasureTestBed(ctx)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.")
|
||||
}
|
||||
|
||||
defer adminTestBed.TearDown()
|
||||
@@ -303,7 +299,7 @@ func TestToAdminAPIErrCode(t *testing.T) {
|
||||
}{
|
||||
// 1. Server not in quorum.
|
||||
{
|
||||
err: errXLWriteQuorum,
|
||||
err: errErasureWriteQuorum,
|
||||
expectedAPIErr: ErrAdminConfigNoQuorum,
|
||||
},
|
||||
// 2. No error.
|
||||
@@ -314,12 +310,12 @@ func TestToAdminAPIErrCode(t *testing.T) {
|
||||
// 3. Non-admin API specific error.
|
||||
{
|
||||
err: errDiskNotFound,
|
||||
expectedAPIErr: toAPIErrorCode(context.Background(), errDiskNotFound),
|
||||
expectedAPIErr: toAPIErrorCode(GlobalContext, errDiskNotFound),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
actualErr := toAdminAPIErrCode(context.Background(), test.err)
|
||||
actualErr := toAdminAPIErrCode(GlobalContext, test.err)
|
||||
if actualErr != test.expectedAPIErr {
|
||||
t.Errorf("Test %d: Expected %v but received %v",
|
||||
i+1, test.expectedAPIErr, actualErr)
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -60,9 +59,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealPushStopNDiscard = fmt.Errorf("heal push stopped due to heal stop signal")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
apiErr := toAPIError(ctx, err)
|
||||
@@ -73,18 +71,11 @@ var (
|
||||
|
||||
// healSequenceStatus - accumulated status of the heal sequence
|
||||
type healSequenceStatus struct {
|
||||
// lock to update this structure as it is concurrently
|
||||
// accessed
|
||||
updateLock *sync.RWMutex
|
||||
|
||||
// summary and detail for failures
|
||||
Summary healStatusSummary `json:"Summary"`
|
||||
FailureDetail string `json:"Detail,omitempty"`
|
||||
StartTime time.Time `json:"StartTime"`
|
||||
|
||||
// disk information
|
||||
NumDisks int `json:"NumDisks"`
|
||||
|
||||
// settings for the heal sequence
|
||||
HealSettings madmin.HealOpts `json:"Settings"`
|
||||
|
||||
@@ -100,25 +91,23 @@ type allHealState struct {
|
||||
healSeqMap map[string]*healSequence
|
||||
}
|
||||
|
||||
// initHealState - initialize healing apparatus
|
||||
func initHealState() *allHealState {
|
||||
// newHealState - initialize global heal state management
|
||||
func newHealState() *allHealState {
|
||||
healState := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
}
|
||||
|
||||
go healState.periodicHealSeqsClean()
|
||||
go healState.periodicHealSeqsClean(GlobalContext)
|
||||
|
||||
return healState
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean() {
|
||||
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
ticker := time.NewTicker(time.Minute * 5)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-time.After(time.Minute * 5):
|
||||
now := UTCNow()
|
||||
ahs.Lock()
|
||||
for path, h := range ahs.healSeqMap {
|
||||
@@ -127,7 +116,7 @@ func (ahs *allHealState) periodicHealSeqsClean() {
|
||||
}
|
||||
}
|
||||
ahs.Unlock()
|
||||
case <-GlobalServiceDoneCh:
|
||||
case <-ctx.Done():
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
return
|
||||
@@ -166,8 +155,13 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
StartTime: UTCNow(),
|
||||
}
|
||||
} else {
|
||||
clientToken := he.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s@%d", he.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
hsp = madmin.HealStopSuccess{
|
||||
ClientToken: he.clientToken,
|
||||
ClientToken: clientToken,
|
||||
ClientAddress: he.clientAddress,
|
||||
StartTime: he.startTime,
|
||||
}
|
||||
@@ -183,7 +177,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
}
|
||||
|
||||
b, err := json.Marshal(&hsp)
|
||||
return b, toAdminAPIErr(context.Background(), err)
|
||||
return b, toAdminAPIErr(GlobalContext, err)
|
||||
}
|
||||
|
||||
// LaunchNewHealSequence - launches a background routine that performs
|
||||
@@ -200,11 +194,9 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
respBytes []byte, apiErr APIError, errMsg string) {
|
||||
|
||||
existsAndLive := false
|
||||
he, exists := ahs.getHealSequence(h.path)
|
||||
he, exists := ahs.getHealSequence(pathJoin(h.bucket, h.object))
|
||||
if exists {
|
||||
if !he.hasEnded() || len(he.currentStatus.Items) > 0 {
|
||||
existsAndLive = true
|
||||
}
|
||||
existsAndLive = !he.hasEnded()
|
||||
}
|
||||
|
||||
if existsAndLive {
|
||||
@@ -229,9 +221,9 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
|
||||
// Check if new heal sequence to be started overlaps with any
|
||||
// existing, running sequence
|
||||
hpath := pathJoin(h.bucket, h.object)
|
||||
for k, hSeq := range ahs.healSeqMap {
|
||||
if !hSeq.hasEnded() && (strings.HasPrefix(k, h.path) ||
|
||||
strings.HasPrefix(h.path, k)) {
|
||||
if !hSeq.hasEnded() && (HasPrefix(k, hpath) || HasPrefix(hpath, k)) {
|
||||
|
||||
errMsg = "The provided heal sequence path overlaps with an existing " +
|
||||
fmt.Sprintf("heal path: %s", k)
|
||||
@@ -240,13 +232,18 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
}
|
||||
|
||||
// Add heal state and start sequence
|
||||
ahs.healSeqMap[h.path] = h
|
||||
ahs.healSeqMap[hpath] = h
|
||||
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart()
|
||||
|
||||
clientToken := h.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s@%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
b, err := json.Marshal(madmin.HealStartSuccess{
|
||||
ClientToken: h.clientToken,
|
||||
ClientToken: clientToken,
|
||||
ClientAddress: h.clientAddress,
|
||||
StartTime: h.startTime,
|
||||
})
|
||||
@@ -261,11 +258,11 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
// status results from global state and returns its JSON
|
||||
// representation. The clientToken helps ensure there aren't
|
||||
// conflicting clients fetching status.
|
||||
func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
func (ahs *allHealState) PopHealStatusJSON(hpath string,
|
||||
clientToken string) ([]byte, APIErrorCode) {
|
||||
|
||||
// fetch heal state for given path
|
||||
h, exists := ahs.getHealSequence(path)
|
||||
h, exists := ahs.getHealSequence(hpath)
|
||||
if !exists {
|
||||
// If there is no such heal sequence, return error.
|
||||
return nil, ErrHealNoSuchProcess
|
||||
@@ -277,8 +274,8 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
}
|
||||
|
||||
// Take lock to access and update the heal-sequence
|
||||
h.currentStatus.updateLock.Lock()
|
||||
defer h.currentStatus.updateLock.Unlock()
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
numItems := len(h.currentStatus.Items)
|
||||
|
||||
@@ -289,38 +286,42 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
lastResultIndex = h.currentStatus.Items[numItems-1].ResultIndex
|
||||
}
|
||||
|
||||
// After sending status to client, and before relinquishing
|
||||
// the updateLock, reset Item to nil and record the result
|
||||
// index sent to the client.
|
||||
defer func(i int64) {
|
||||
h.lastSentResultIndex = i
|
||||
h.currentStatus.Items = nil
|
||||
}(lastResultIndex)
|
||||
h.lastSentResultIndex = lastResultIndex
|
||||
|
||||
jbytes, err := json.Marshal(h.currentStatus)
|
||||
if err != nil {
|
||||
h.currentStatus.Items = nil
|
||||
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
h.currentStatus.Items = nil
|
||||
|
||||
return jbytes, ErrNone
|
||||
}
|
||||
|
||||
// healSource denotes single entity and heal option.
|
||||
type healSource struct {
|
||||
bucket string
|
||||
object string
|
||||
versionID string
|
||||
opts *madmin.HealOpts // optional heal option overrides default setting
|
||||
}
|
||||
|
||||
// healSequence - state for each heal sequence initiated on the
|
||||
// server.
|
||||
type healSequence struct {
|
||||
// bucket, and prefix on which heal seq. was initiated
|
||||
bucket, objPrefix string
|
||||
// bucket, and object on which heal seq. was initiated
|
||||
bucket, object string
|
||||
|
||||
// path is just pathJoin(bucket, objPrefix)
|
||||
path string
|
||||
// A channel of entities (format, buckets, objects) to heal
|
||||
sourceCh chan healSource
|
||||
|
||||
// List of entities (format, buckets, objects) to heal
|
||||
sourceCh chan string
|
||||
// A channel of entities with heal result
|
||||
respCh chan healResult
|
||||
|
||||
// Report healing progress, false if this is a background
|
||||
// healing since currently there is no entity which will
|
||||
// receive realtime healing status
|
||||
// Report healing progress
|
||||
reportProgress bool
|
||||
|
||||
// time at which heal sequence was started
|
||||
@@ -345,59 +346,141 @@ type healSequence struct {
|
||||
// completed
|
||||
traverseAndHealDoneCh chan error
|
||||
|
||||
// channel to signal heal sequence to stop (e.g. from the
|
||||
// heal-stop API)
|
||||
stopSignalCh chan struct{}
|
||||
// canceler to cancel heal sequence.
|
||||
cancelCtx context.CancelFunc
|
||||
|
||||
// the last result index sent to client
|
||||
lastSentResultIndex int64
|
||||
|
||||
// Number of total items scanned
|
||||
scannedItemsCount int64
|
||||
// Number of total items scanned against item type
|
||||
scannedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// Number of total items healed against item type
|
||||
healedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// Number of total items where healing failed against endpoint and drive state
|
||||
healFailedItemsMap map[string]int64
|
||||
|
||||
// The time of the last scan/heal activity
|
||||
lastHealActivity time.Time
|
||||
|
||||
// Holds the request-info for logging
|
||||
ctx context.Context
|
||||
|
||||
// used to lock this structure as it is concurrently accessed
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewHealSequence - creates healSettings, assumes bucket and
|
||||
// objPrefix are already validated.
|
||||
func newHealSequence(bucket, objPrefix, clientAddr string,
|
||||
numDisks int, hs madmin.HealOpts, forceStart bool) *healSequence {
|
||||
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||
hs madmin.HealOpts, forceStart bool) *healSequence {
|
||||
|
||||
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
|
||||
reqInfo.AppendTags("prefix", objPrefix)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
|
||||
|
||||
clientToken := mustGetUUID()
|
||||
|
||||
return &healSequence{
|
||||
respCh: make(chan healResult),
|
||||
bucket: bucket,
|
||||
objPrefix: objPrefix,
|
||||
path: pathJoin(bucket, objPrefix),
|
||||
object: objPrefix,
|
||||
reportProgress: true,
|
||||
startTime: UTCNow(),
|
||||
clientToken: mustGetUUID(),
|
||||
clientToken: clientToken,
|
||||
clientAddress: clientAddr,
|
||||
forceStarted: forceStart,
|
||||
settings: hs,
|
||||
currentStatus: healSequenceStatus{
|
||||
Summary: healNotStartedStatus,
|
||||
HealSettings: hs,
|
||||
NumDisks: numDisks,
|
||||
updateLock: &sync.RWMutex{},
|
||||
},
|
||||
traverseAndHealDoneCh: make(chan error),
|
||||
stopSignalCh: make(chan struct{}),
|
||||
cancelCtx: cancel,
|
||||
ctx: ctx,
|
||||
scannedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healFailedItemsMap: make(map[string]int64),
|
||||
}
|
||||
}
|
||||
|
||||
// resetHealStatusCounters - reset the healSequence status counters between
|
||||
// each monthly background heal scanning activity.
|
||||
// This is used only in case of Background healing scenario, where
|
||||
// we use a single long running healSequence which reactively heals
|
||||
// objects passed to the SourceCh.
|
||||
func (h *healSequence) resetHealStatusCounters() {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.currentStatus.Items = []madmin.HealResultItem{}
|
||||
h.lastSentResultIndex = 0
|
||||
h.scannedItemsMap = make(map[madmin.HealItemType]int64)
|
||||
h.healedItemsMap = make(map[madmin.HealItemType]int64)
|
||||
h.healFailedItemsMap = make(map[string]int64)
|
||||
}
|
||||
|
||||
// getScannedItemsCount - returns a count of all scanned items
|
||||
func (h *healSequence) getScannedItemsCount() int64 {
|
||||
var count int64
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
for _, v := range h.scannedItemsMap {
|
||||
count = count + v
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// getScannedItemsMap - returns map of all scanned items against type
|
||||
func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
|
||||
for k, v := range h.scannedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
||||
// getHealedItemsMap - returns the map of all healed items against type
|
||||
func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
|
||||
for k, v := range h.healedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
||||
// gethealFailedItemsMap - returns map of all items where heal failed against
|
||||
// drive endpoint and status
|
||||
func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[string]int64, len(h.healFailedItemsMap))
|
||||
for k, v := range h.healFailedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
||||
// isQuitting - determines if the heal sequence is quitting (due to an
|
||||
// external signal)
|
||||
func (h *healSequence) isQuitting() bool {
|
||||
select {
|
||||
case <-h.stopSignalCh:
|
||||
case <-h.ctx.Done():
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
@@ -406,19 +489,15 @@ func (h *healSequence) isQuitting() bool {
|
||||
|
||||
// check if the heal sequence has ended
|
||||
func (h *healSequence) hasEnded() bool {
|
||||
h.currentStatus.updateLock.RLock()
|
||||
summary := h.currentStatus.Summary
|
||||
h.currentStatus.updateLock.RUnlock()
|
||||
return summary == healStoppedStatus || summary == healFinishedStatus
|
||||
h.mutex.RLock()
|
||||
ended := len(h.currentStatus.Items) == 0 || h.currentStatus.Summary == healStoppedStatus || h.currentStatus.Summary == healFinishedStatus
|
||||
h.mutex.RUnlock()
|
||||
return ended
|
||||
}
|
||||
|
||||
// stops the heal sequence - safe to call multiple times.
|
||||
func (h *healSequence) stop() {
|
||||
select {
|
||||
case <-h.stopSignalCh:
|
||||
default:
|
||||
close(h.stopSignalCh)
|
||||
}
|
||||
h.cancelCtx()
|
||||
}
|
||||
|
||||
// pushHealResultItem - pushes a heal result item for consumption in
|
||||
@@ -445,29 +524,27 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
|
||||
var itemsLen int
|
||||
for {
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.mutex.Lock()
|
||||
itemsLen = len(h.currentStatus.Items)
|
||||
if itemsLen == maxUnconsumedHealResultItems {
|
||||
// unlock and wait to check again if we can push
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
|
||||
// wait for a second, or quit if an external
|
||||
// stop signal is received or the
|
||||
// unconsumedTimer fires.
|
||||
select {
|
||||
// Check after a second
|
||||
case <-time.After(time.Second):
|
||||
h.mutex.Unlock()
|
||||
continue
|
||||
|
||||
case <-h.stopSignalCh:
|
||||
case <-h.ctx.Done():
|
||||
h.mutex.Unlock()
|
||||
// discard result and return.
|
||||
return errHealPushStopNDiscard
|
||||
return errHealStopSignalled
|
||||
|
||||
// Timeout if no results consumed for too
|
||||
// long.
|
||||
// Timeout if no results consumed for too long.
|
||||
case <-unconsumedTimer.C:
|
||||
h.mutex.Unlock()
|
||||
return errHealIdleTimeout
|
||||
|
||||
}
|
||||
}
|
||||
break
|
||||
@@ -484,13 +561,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
h.currentStatus.Items = append(h.currentStatus.Items, r)
|
||||
|
||||
// release lock
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
|
||||
// This is a "safe" point for the heal sequence to quit if
|
||||
// signaled externally.
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
h.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -504,10 +575,10 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
// the heal-sequence.
|
||||
func (h *healSequence) healSequenceStart() {
|
||||
// Set status as running
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.mutex.Lock()
|
||||
h.currentStatus.Summary = healRunningStatus
|
||||
h.currentStatus.StartTime = UTCNow()
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
h.mutex.Unlock()
|
||||
|
||||
if h.sourceCh == nil {
|
||||
go h.traverseAndHeal()
|
||||
@@ -517,25 +588,27 @@ func (h *healSequence) healSequenceStart() {
|
||||
|
||||
select {
|
||||
case err, ok := <-h.traverseAndHealDoneCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
h.mutex.Lock()
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
defer h.currentStatus.updateLock.Unlock()
|
||||
// Heal traversal is complete.
|
||||
if ok {
|
||||
if err == nil {
|
||||
// heal traversal succeeded.
|
||||
h.currentStatus.Summary = healFinishedStatus
|
||||
} else {
|
||||
// heal traversal had an error.
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = err.Error()
|
||||
} else {
|
||||
// heal traversal succeeded.
|
||||
h.currentStatus.Summary = healFinishedStatus
|
||||
}
|
||||
|
||||
case <-h.stopSignalCh:
|
||||
h.mutex.Unlock()
|
||||
case <-h.ctx.Done():
|
||||
h.mutex.Lock()
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
h.mutex.Unlock()
|
||||
|
||||
// drain traverse channel so the traversal
|
||||
// go-routine does not leak.
|
||||
@@ -548,62 +621,95 @@ func (h *healSequence) healSequenceStart() {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(path string, healType madmin.HealItemType) error {
|
||||
var respCh = make(chan healResult)
|
||||
defer close(respCh)
|
||||
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
|
||||
// Send heal request
|
||||
globalBackgroundHealRoutine.queueHealTask(healTask{path: path, responseCh: respCh, opts: h.settings})
|
||||
// Wait for answer and push result to the client
|
||||
res := <-respCh
|
||||
if !h.reportProgress {
|
||||
return nil
|
||||
task := healTask{
|
||||
bucket: source.bucket,
|
||||
object: source.object,
|
||||
versionID: source.versionID,
|
||||
opts: h.settings,
|
||||
responseCh: h.respCh,
|
||||
}
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and return success.
|
||||
if isErrObjectNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
}
|
||||
globalBackgroundHealRoutine.queueHealTask(task)
|
||||
|
||||
select {
|
||||
case res := <-h.respCh:
|
||||
if !h.reportProgress {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and
|
||||
// return success.
|
||||
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
// Progress is not reported in case of background heal processing.
|
||||
// Instead we increment relevant counter based on the heal result
|
||||
// for prometheus reporting.
|
||||
if res.err != nil {
|
||||
for _, d := range res.result.After.Drives {
|
||||
// For failed items we report the endpoint and drive state
|
||||
// This will help users take corrective actions for drives
|
||||
h.healFailedItemsMap[d.Endpoint+","+d.State]++
|
||||
}
|
||||
} else {
|
||||
// Only object type reported for successful healing
|
||||
h.healedItemsMap[res.result.Type]++
|
||||
}
|
||||
|
||||
// Report caller of any failure
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and return success.
|
||||
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
}
|
||||
|
||||
func (h *healSequence) healItemsFromSourceCh() error {
|
||||
bucketsOnly := true // heal buckets only, not objects.
|
||||
if err := h.healItems(bucketsOnly); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case path := <-h.sourceCh:
|
||||
case source, ok := <-h.sourceCh:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
var itemType madmin.HealItemType
|
||||
switch {
|
||||
case path == nopHeal:
|
||||
case source.bucket == nopHeal:
|
||||
continue
|
||||
case path == SlashSeparator:
|
||||
case source.bucket == SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
case !strings.Contains(path, SlashSeparator):
|
||||
case source.bucket != "" && source.object == "":
|
||||
itemType = madmin.HealItemBucket
|
||||
default:
|
||||
itemType = madmin.HealItemObject
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(path, itemType); err != nil {
|
||||
if err := h.queueHealTask(source, itemType); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
h.scannedItemsCount++
|
||||
h.scannedItemsMap[itemType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
case <-h.traverseAndHealDoneCh:
|
||||
return nil
|
||||
case <-GlobalServiceDoneCh:
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -611,10 +717,9 @@ func (h *healSequence) healItemsFromSourceCh() error {
|
||||
|
||||
func (h *healSequence) healFromSourceCh() {
|
||||
h.healItemsFromSourceCh()
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
func (h *healSequence) healDiskMeta() error {
|
||||
// Start with format healing
|
||||
if err := h.healDiskFormat(); err != nil {
|
||||
return err
|
||||
@@ -626,13 +731,12 @@ func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
// Start healing the bucket config prefix.
|
||||
if err := h.healMinioSysMeta(bucketConfigPrefix)(); err != nil {
|
||||
return err
|
||||
}
|
||||
return h.healMinioSysMeta(bucketConfigPrefix)()
|
||||
}
|
||||
|
||||
// Start healing the background ops prefix.
|
||||
if err := h.healMinioSysMeta(backgroundOpsMetaPrefix)(); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
if err := h.healDiskMeta(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Heal buckets and objects
|
||||
@@ -648,13 +752,7 @@ func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
// two objects.
|
||||
func (h *healSequence) traverseAndHeal() {
|
||||
bucketsOnly := false // Heals buckets and objects also.
|
||||
if err := h.healItems(bucketsOnly); err != nil {
|
||||
if h.isQuitting() {
|
||||
err = errHealStopSignalled
|
||||
}
|
||||
h.traverseAndHealDoneCh <- err
|
||||
}
|
||||
|
||||
h.traverseAndHealDoneCh <- h.healItems(bucketsOnly)
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
@@ -671,15 +769,19 @@ func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
// NOTE: Healing on meta is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// meta are always upto date and correct.
|
||||
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, func(bucket string, object string) error {
|
||||
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
herr := h.queueHealTask(pathJoin(bucket, object), madmin.HealItemBucketMetadata)
|
||||
herr := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemBucketMetadata)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if isErrObjectNotFound(herr) {
|
||||
if isErrObjectNotFound(herr) || isErrVersionNotFound(herr) {
|
||||
return nil
|
||||
}
|
||||
return herr
|
||||
@@ -700,7 +802,7 @@ func (h *healSequence) healDiskFormat() error {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
return h.queueHealTask(SlashSeparator, madmin.HealItemMetadata)
|
||||
return h.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
@@ -742,7 +844,7 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(bucket, madmin.HealItemBucket); err != nil {
|
||||
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -751,12 +853,12 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
if !h.settings.Recursive {
|
||||
if h.objPrefix != "" {
|
||||
if h.object != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
// and if so heal it.
|
||||
_, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{})
|
||||
_, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
if err == nil {
|
||||
if err = h.healObject(bucket, h.objPrefix); err != nil {
|
||||
if err = h.healObject(bucket, h.object, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -765,14 +867,14 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.healObject); err != nil {
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// healObject - heal the given object and record result
|
||||
func (h *healSequence) healObject(bucket, object string) error {
|
||||
func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
@@ -783,5 +885,9 @@ func (h *healSequence) healObject(bucket, object string) error {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
return h.queueHealTask(pathJoin(bucket, object), madmin.HealItemObject)
|
||||
return h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
}
|
||||
|
||||
118
cmd/admin-quota-handlers.go
Normal file
118
cmd/admin-quota-handlers.go
Normal file
@@ -0,0 +1,118 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
bucketQuotaConfigFile = "quota.json"
|
||||
)
|
||||
|
||||
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
|
||||
// ----------
|
||||
// Places a quota configuration on the specified bucket. The quota
|
||||
// specified in the quota configuration will be applied by default
|
||||
// to enforce total quota for the specified bucket.
|
||||
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Turn off quota commands if data usage info is unavailable.
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminBucketQuotaDisabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = parseBucketQuota(bucket, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketQuotaConfigHandler - gets bucket quota configuration
|
||||
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, configData)
|
||||
}
|
||||
@@ -20,13 +20,17 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
adminPathPrefix = minioReservedBucketPath + "/admin"
|
||||
adminAPIVersion = madmin.AdminAPIVersion
|
||||
adminAPIVersionPrefix = SlashSeparator + madmin.AdminAPIVersion
|
||||
adminPathPrefix = minioReservedBucketPath + "/admin"
|
||||
adminAPIVersionV2 = madmin.AdminAPIVersionV2
|
||||
adminAPIVersion = madmin.AdminAPIVersion
|
||||
adminAPIVersionPrefix = SlashSeparator + adminAPIVersion
|
||||
adminAPIVersionV2Prefix = SlashSeparator + adminAPIVersionV2
|
||||
)
|
||||
|
||||
// adminAPIHandlers provides HTTP handlers for MinIO admin API.
|
||||
@@ -41,127 +45,178 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
/// Service operations
|
||||
|
||||
// Restart and stop MinIO service.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceActionHandler)).Queries("action", "{action:.*}")
|
||||
// Update MinIO servers.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
|
||||
|
||||
// Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
|
||||
// Harware Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/hardware").HandlerFunc(httpTraceAll(adminAPI.ServerHardwareInfoHandler)).Queries("hwType", "{hwType:.*}")
|
||||
|
||||
// StorageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
|
||||
// DataUsageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/accountingusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountingUsageInfoHandler))
|
||||
|
||||
if globalIsDistXL || globalIsXL {
|
||||
/// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
|
||||
|
||||
/// Health operations
|
||||
|
||||
}
|
||||
// Performance command - return performance details based on input type
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}")
|
||||
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/get-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/set-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKVHandler))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix + "/del-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.DelConfigKVHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/help-config-kv").HandlerFunc(httpTraceAll(adminAPI.HelpConfigKVHandler)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/list-config-history-kv").HandlerFunc(httpTraceAll(adminAPI.ListConfigHistoryKVHandler)).Queries("count", "{count:[0-9]+}")
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/clear-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/restore-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
adminVersions := []string{
|
||||
adminAPIVersionPrefix,
|
||||
adminAPIVersionV2Prefix,
|
||||
}
|
||||
|
||||
/// Config operations
|
||||
if enableConfigOps {
|
||||
// Get config
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
|
||||
for _, adminVersion := range adminVersions {
|
||||
// Restart and stop MinIO service.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceHandler)).Queries("action", "{action:.*}")
|
||||
// Update MinIO servers.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
|
||||
|
||||
// Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
|
||||
|
||||
// StorageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
|
||||
// DataUsageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
/// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
|
||||
|
||||
/// Health operations
|
||||
|
||||
}
|
||||
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/set-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKVHandler))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/del-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.DelConfigKVHandler))
|
||||
}
|
||||
|
||||
// Enable config help in all modes.
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/help-config-kv").HandlerFunc(httpTraceAll(adminAPI.HelpConfigKVHandler)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
|
||||
|
||||
// Config KV history operations.
|
||||
if enableConfigOps {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-config-history-kv").HandlerFunc(httpTraceAll(adminAPI.ListConfigHistoryKVHandler)).Queries("count", "{count:[0-9]+}")
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/clear-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
}
|
||||
|
||||
/// Config import/export bulk operations
|
||||
if enableConfigOps {
|
||||
// Get config
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
|
||||
}
|
||||
|
||||
if enableIAMOps {
|
||||
// -- IAM APIs --
|
||||
|
||||
// Add policy IAM
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountUsageInfoHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// Service accounts ops
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/add-service-account").HandlerFunc(httpTraceHdrs(adminAPI.AddServiceAccount))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-service-accounts").HandlerFunc(httpTraceHdrs(adminAPI.ListServiceAccounts))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/delete-service-account").HandlerFunc(httpTraceHdrs(adminAPI.DeleteServiceAccount)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
if adminVersion == adminAPIVersionV2Prefix {
|
||||
// Info policy IAM v2
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicyV2)).Queries("name", "{name:.*}")
|
||||
|
||||
// List policies v2
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPoliciesV2))
|
||||
} else {
|
||||
// Info policy IAM latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// List policies latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
}
|
||||
|
||||
// Remove policy IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Set user or group policy
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-or-group-policy").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
|
||||
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
|
||||
|
||||
// Remove user IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// List users
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
|
||||
// User info
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Add/Remove members from group
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
|
||||
|
||||
// Get Group
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
|
||||
|
||||
// List Groups
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
|
||||
|
||||
// Set Group Status
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
}
|
||||
|
||||
// Quota operations
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
|
||||
// GetBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
}
|
||||
}
|
||||
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
if globalIsDistErasure {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
}
|
||||
|
||||
// HTTP Trace
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(adminAPI.TraceHandler)
|
||||
|
||||
// Console Logs
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
|
||||
|
||||
// -- KMS APIs --
|
||||
//
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/kms/key/create").HandlerFunc(httpTraceAll(adminAPI.KMSCreateKeyHandler)).Queries("key-id", "{key-id:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
|
||||
|
||||
if !globalIsGateway {
|
||||
// -- OBD API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
|
||||
Queries("perfdrive", "{perfdrive:true|false}",
|
||||
"perfnet", "{perfnet:true|false}",
|
||||
"minioinfo", "{minioinfo:true|false}",
|
||||
"minioconfig", "{minioconfig:true|false}",
|
||||
"syscpu", "{syscpu:true|false}",
|
||||
"sysdiskhw", "{sysdiskhw:true|false}",
|
||||
"sysosinfo", "{sysosinfo:true|false}",
|
||||
"sysmem", "{sysmem:true|false}",
|
||||
"sysprocess", "{sysprocess:true|false}",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if enableIAMOps {
|
||||
// -- IAM APIs --
|
||||
|
||||
// Add policy IAM
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name",
|
||||
"{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// Info policy IAM
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Remove policy IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Set user or group policy
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-user-or-group-policy").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
|
||||
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
|
||||
|
||||
// Remove user IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// List users
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
|
||||
// User info
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Add/Remove members from group
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
|
||||
|
||||
// Get Group
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
|
||||
|
||||
// List Groups
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
|
||||
|
||||
// Set Group Status
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// List policies
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
}
|
||||
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
if globalIsDistXL {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
}
|
||||
|
||||
// HTTP Trace
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/trace").HandlerFunc(adminAPI.TraceHandler)
|
||||
|
||||
// Console Logs
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
|
||||
|
||||
// -- KMS APIs --
|
||||
//
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
|
||||
@@ -17,181 +17,16 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio/pkg/cpu"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/mem"
|
||||
|
||||
cpuhw "github.com/shirou/gopsutil/cpu"
|
||||
)
|
||||
|
||||
// getLocalMemUsage - returns ServerMemUsageInfo for all zones, endpoints.
|
||||
func getLocalMemUsage(endpointZones EndpointZones, r *http.Request) ServerMemUsageInfo {
|
||||
var memUsages []mem.Usage
|
||||
var historicUsages []mem.Usage
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
memUsages = append(memUsages, mem.GetUsage())
|
||||
historicUsages = append(historicUsages, mem.GetHistoricUsage())
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return ServerMemUsageInfo{
|
||||
Addr: addr,
|
||||
Usage: memUsages,
|
||||
HistoricUsage: historicUsages,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalCPULoad - returns ServerCPULoadInfo for all zones, endpoints.
|
||||
func getLocalCPULoad(endpointZones EndpointZones, r *http.Request) ServerCPULoadInfo {
|
||||
var cpuLoads []cpu.Load
|
||||
var historicLoads []cpu.Load
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuLoads = append(cpuLoads, cpu.GetLoad())
|
||||
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return ServerCPULoadInfo{
|
||||
Addr: addr,
|
||||
Load: cpuLoads,
|
||||
HistoricLoad: historicLoads,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalDrivesPerf - returns ServerDrivesPerfInfo for all zones, endpoints.
|
||||
func getLocalDrivesPerf(endpointZones EndpointZones, size int64, r *http.Request) madmin.ServerDrivesPerfInfo {
|
||||
var dps []disk.Performance
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
if _, err := os.Stat(endpoint.Path); err != nil {
|
||||
// Since this drive is not available, add relevant details and proceed
|
||||
dps = append(dps, disk.Performance{Path: endpoint.Path, Error: err.Error()})
|
||||
continue
|
||||
}
|
||||
dp := disk.GetPerformance(pathJoin(endpoint.Path, minioMetaTmpBucket, mustGetUUID()), size)
|
||||
dp.Path = endpoint.Path
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return madmin.ServerDrivesPerfInfo{
|
||||
Addr: addr,
|
||||
Perf: dps,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalCPUInfo - returns ServerCPUHardwareInfo for all zones, endpoints.
|
||||
func getLocalCPUInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerCPUHardwareInfo {
|
||||
var cpuHardwares []cpuhw.InfoStat
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
// Add to the list of visited hosts
|
||||
seenHosts.Add(endpoint.Host)
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuHardware, err := cpuhw.Info()
|
||||
if err != nil {
|
||||
return madmin.ServerCPUHardwareInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
cpuHardwares = append(cpuHardwares, cpuHardware...)
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
|
||||
return madmin.ServerCPUHardwareInfo{
|
||||
Addr: addr,
|
||||
CPUInfo: cpuHardwares,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalNetworkInfo - returns ServerNetworkHardwareInfo for all zones, endpoints.
|
||||
func getLocalNetworkInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerNetworkHardwareInfo {
|
||||
var networkHardwares []net.Interface
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
// Add to the list of visited hosts
|
||||
seenHosts.Add(endpoint.Host)
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
networkHardware, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return madmin.ServerNetworkHardwareInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
networkHardwares = append(networkHardwares, networkHardware...)
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
|
||||
return madmin.ServerNetworkHardwareInfo{
|
||||
Addr: addr,
|
||||
NetworkInfo: networkHardwares,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalServerProperty - returns ServerDrivesPerfInfo for only the
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
|
||||
var disks []madmin.Disk
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
if globalIsDistErasure {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
network := make(map[string]string)
|
||||
@@ -204,33 +39,14 @@ func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin
|
||||
if endpoint.IsLocal {
|
||||
// Only proceed for local endpoints
|
||||
network[nodeName] = "online"
|
||||
var di = madmin.Disk{
|
||||
DrivePath: endpoint.Path,
|
||||
}
|
||||
diInfo, err := disk.GetInfo(endpoint.Path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || isSysErrPathNotFound(err) {
|
||||
di.State = madmin.DriveStateMissing
|
||||
} else {
|
||||
di.State = madmin.DriveStateCorrupt
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
if err := IsServerResolvable(endpoint); err == nil {
|
||||
network[nodeName] = "online"
|
||||
} else {
|
||||
di.State = madmin.DriveStateOk
|
||||
di.DrivePath = endpoint.Path
|
||||
di.TotalSpace = diInfo.Total
|
||||
di.UsedSpace = diInfo.Total - diInfo.Free
|
||||
di.Utilization = float64((diInfo.Total - diInfo.Free) / diInfo.Total * 100)
|
||||
}
|
||||
disks = append(disks, di)
|
||||
} else {
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
err := IsServerResolvable(endpoint)
|
||||
if err == nil {
|
||||
network[nodeName] = "online"
|
||||
} else {
|
||||
network[nodeName] = "offline"
|
||||
}
|
||||
network[nodeName] = "offline"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -243,6 +59,5 @@ func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin
|
||||
Version: Version,
|
||||
CommitID: CommitID,
|
||||
Network: network,
|
||||
Disks: disks,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,9 +20,18 @@ import (
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
// ObjectIdentifier carries key name for the object to delete.
|
||||
type ObjectIdentifier struct {
|
||||
// DeletedObject objects deleted
|
||||
type DeletedObject struct {
|
||||
DeleteMarker bool `xml:"DeleteMarker,omitempty"`
|
||||
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
|
||||
ObjectName string `xml:"Key,omitempty"`
|
||||
VersionID string `xml:"VersionId,omitempty"`
|
||||
}
|
||||
|
||||
// ObjectToDelete carries key name for the object to delete.
|
||||
type ObjectToDelete struct {
|
||||
ObjectName string `xml:"Key"`
|
||||
VersionID string `xml:"VersionId"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
@@ -37,5 +46,5 @@ type DeleteObjectsRequest struct {
|
||||
// Element to enable quiet mode for the request
|
||||
Quiet bool
|
||||
// List of objects to be deleted
|
||||
Objects []ObjectIdentifier `xml:"Object"`
|
||||
Objects []ObjectToDelete `xml:"Object"`
|
||||
}
|
||||
|
||||
@@ -25,18 +25,18 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
minio "github.com/minio/minio-go/v6"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/object/tagging"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
@@ -101,6 +101,9 @@ const (
|
||||
ErrNoSuchBucketLifecycle
|
||||
ErrNoSuchLifecycleConfiguration
|
||||
ErrNoSuchBucketSSEConfig
|
||||
ErrNoSuchCORSConfiguration
|
||||
ErrNoSuchWebsiteConfiguration
|
||||
ErrReplicationConfigurationNotFoundError
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrNoSuchVersion
|
||||
@@ -122,7 +125,8 @@ const (
|
||||
ErrMissingCredTag
|
||||
ErrCredMalformed
|
||||
ErrInvalidRegion
|
||||
ErrInvalidService
|
||||
ErrInvalidServiceS3
|
||||
ErrInvalidServiceSTS
|
||||
ErrInvalidRequestVersion
|
||||
ErrMissingSignTag
|
||||
ErrMissingSignHeadersTag
|
||||
@@ -150,12 +154,14 @@ const (
|
||||
ErrBadRequest
|
||||
ErrKeyTooLongError
|
||||
ErrInvalidBucketObjectLockConfiguration
|
||||
ErrObjectLockConfigurationNotFound
|
||||
ErrObjectLockConfigurationNotAllowed
|
||||
ErrNoSuchObjectLockConfiguration
|
||||
ErrObjectLocked
|
||||
ErrInvalidRetentionDate
|
||||
ErrPastObjectLockRetainDate
|
||||
ErrUnknownWORMModeDirective
|
||||
ErrBucketTaggingNotFound
|
||||
ErrObjectLockInvalidHeaders
|
||||
ErrInvalidTagDirective
|
||||
// Add new error codes here.
|
||||
@@ -210,6 +216,7 @@ const (
|
||||
ErrInvalidResourceName
|
||||
ErrServerNotInitialized
|
||||
ErrOperationTimedOut
|
||||
ErrOperationMaxedOut
|
||||
ErrInvalidRequest
|
||||
// MinIO storage class error codes
|
||||
ErrInvalidStorageClass
|
||||
@@ -233,6 +240,10 @@ const (
|
||||
ErrAdminCredentialsMismatch
|
||||
ErrInsecureClientRequest
|
||||
ErrObjectTampered
|
||||
// Bucket Quota error codes
|
||||
ErrAdminBucketQuotaExceeded
|
||||
ErrAdminNoSuchQuotaConfiguration
|
||||
ErrAdminBucketQuotaDisabled
|
||||
|
||||
ErrHealNotImplemented
|
||||
ErrHealNoSuchProcess
|
||||
@@ -332,19 +343,28 @@ const (
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrServiceAccountNotFound
|
||||
ErrPostPolicyConditionInvalidFormat
|
||||
)
|
||||
|
||||
type errorCodeMap map[APIErrorCode]APIError
|
||||
|
||||
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
|
||||
func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError {
|
||||
apiErr, ok := e[errCode]
|
||||
if !ok {
|
||||
return e[ErrInternalError]
|
||||
apiErr = e[ErrInternalError]
|
||||
}
|
||||
if err != nil {
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
|
||||
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
|
||||
return e.ToAPIErrWithErr(errCode, nil)
|
||||
}
|
||||
|
||||
// error code to APIError structure, these fields carry respective
|
||||
// descriptions for all the error responses.
|
||||
var errorCodes = errorCodeMap{
|
||||
@@ -440,7 +460,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrInvalidAccessKeyID: {
|
||||
Code: "InvalidAccessKeyId",
|
||||
Description: "The access key ID you provided does not exist in our records.",
|
||||
Description: "The Access Key Id you provided does not exist in our records.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidBucketName: {
|
||||
@@ -519,9 +539,9 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchVersion: {
|
||||
Code: "NoSuchVersion",
|
||||
Description: "Indicates that the version ID specified in the request does not match an existing version.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
Code: "InvalidArgument",
|
||||
Description: "Invalid version id specified",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNotImplemented: {
|
||||
Code: "NotImplemented",
|
||||
@@ -652,9 +672,14 @@ var errorCodes = errorCodeMap{
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect service \"s4\". This endpoint belongs to \"s3\".".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrInvalidService: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
|
||||
ErrInvalidServiceS3: {
|
||||
Code: "AuthorizationParametersError",
|
||||
Description: "Error parsing the Credential/X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidServiceSTS: {
|
||||
Code: "AuthorizationParametersError",
|
||||
Description: "Error parsing the Credential parameter; incorrect service. This endpoint belongs to \"sts\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
@@ -757,11 +782,36 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Bucket is missing ObjectLockConfiguration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketTaggingNotFound: {
|
||||
Code: "NoSuchTagSet",
|
||||
Description: "The TagSet does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrObjectLockConfigurationNotFound: {
|
||||
Code: "ObjectLockConfigurationNotFoundError",
|
||||
Description: "Object Lock configuration does not exist for this bucket",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrObjectLockConfigurationNotAllowed: {
|
||||
Code: "InvalidBucketState",
|
||||
Description: "Object Lock configuration cannot be enabled on existing buckets.",
|
||||
Description: "Object Lock configuration cannot be enabled on existing buckets",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrNoSuchCORSConfiguration: {
|
||||
Code: "NoSuchCORSConfiguration",
|
||||
Description: "The CORS configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchWebsiteConfiguration: {
|
||||
Code: "NoSuchWebsiteConfiguration",
|
||||
Description: "The specified bucket does not have a website configuration",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationConfigurationNotFoundError: {
|
||||
Code: "ReplicationConfigurationNotFoundError",
|
||||
Description: "The replication configuration was not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchObjectLockConfiguration: {
|
||||
Code: "NoSuchObjectLockConfiguration",
|
||||
Description: "The specified object does not have a ObjectLock configuration",
|
||||
@@ -1068,15 +1118,35 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Credentials in config mismatch with server environment variables",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrAdminBucketQuotaExceeded: {
|
||||
Code: "XMinioAdminBucketQuotaExceeded",
|
||||
Description: "Bucket quota exceeded",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchQuotaConfiguration: {
|
||||
Code: "XMinioAdminNoSuchQuotaConfiguration",
|
||||
Description: "The quota configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminBucketQuotaDisabled: {
|
||||
Code: "XMinioAdminBucketQuotaDisabled",
|
||||
Description: "Quota specified but disk usage crawl is disabled on MinIO server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInsecureClientRequest: {
|
||||
Code: "XMinioInsecureClientRequest",
|
||||
Description: "Cannot respond to plain-text request from TLS-encrypted server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrOperationTimedOut: {
|
||||
Code: "XMinioServerTimedOut",
|
||||
Description: "A timeout occurred while trying to lock a resource",
|
||||
HTTPStatusCode: http.StatusRequestTimeout,
|
||||
Code: "RequestTimeout",
|
||||
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrOperationMaxedOut: {
|
||||
Code: "SlowDown",
|
||||
Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrUnsupportedMetadata: {
|
||||
Code: "InvalidArgument",
|
||||
@@ -1573,6 +1643,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "User is not allowed to be same as admin access key",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrAdminAccountNotEligible: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The administrator key is not eligible for this operation",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrServiceAccountNotFound: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The specified service account is not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrPostPolicyConditionInvalidFormat: {
|
||||
Code: "PostPolicyInvalidKeyName",
|
||||
Description: "Invalid according to Policy: Policy Condition failed",
|
||||
@@ -1588,7 +1668,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
if err == nil {
|
||||
return ErrNone
|
||||
}
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
|
||||
switch err {
|
||||
case errInvalidArgument:
|
||||
apiErr = ErrAdminInvalidArgument
|
||||
@@ -1703,6 +1783,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrBucketAlreadyOwnedByYou
|
||||
case ObjectNotFound:
|
||||
apiErr = ErrNoSuchKey
|
||||
case MethodNotAllowed:
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case VersionNotFound:
|
||||
apiErr = ErrNoSuchVersion
|
||||
case ObjectAlreadyExists:
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case ObjectNameInvalid:
|
||||
@@ -1747,6 +1831,14 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrNoSuchLifecycleConfiguration
|
||||
case BucketSSEConfigNotFound:
|
||||
apiErr = ErrNoSuchBucketSSEConfig
|
||||
case BucketTaggingNotFound:
|
||||
apiErr = ErrBucketTaggingNotFound
|
||||
case BucketObjectLockConfigNotFound:
|
||||
apiErr = ErrObjectLockConfigurationNotFound
|
||||
case BucketQuotaConfigNotFound:
|
||||
apiErr = ErrAdminNoSuchQuotaConfiguration
|
||||
case BucketQuotaExceeded:
|
||||
apiErr = ErrAdminBucketQuotaExceeded
|
||||
case *event.ErrInvalidEventName:
|
||||
apiErr = ErrEventNotification
|
||||
case *event.ErrInvalidARN:
|
||||
@@ -1817,6 +1909,12 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
// their internal error types. This code is only
|
||||
// useful with gateway implementations.
|
||||
switch e := err.(type) {
|
||||
case InvalidArgument:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidArgument",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: errorCodes[ErrInvalidRequest].HTTPStatusCode,
|
||||
}
|
||||
case *xml.SyntaxError:
|
||||
apiErr = APIError{
|
||||
Code: "MalformedXML",
|
||||
@@ -1831,13 +1929,19 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
e.Error()),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case versioning.Error:
|
||||
apiErr = APIError{
|
||||
Code: "IllegalVersioningConfigurationException",
|
||||
Description: fmt.Sprintf("Versioning configuration specified in the request is invalid. (%s)", e.Error()),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case lifecycle.Error:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case tagging.Error:
|
||||
case tags.Error:
|
||||
apiErr = APIError{
|
||||
Code: e.Code(),
|
||||
Description: e.Error(),
|
||||
@@ -1879,12 +1983,6 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: e.Response().StatusCode,
|
||||
}
|
||||
case oss.ServiceError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
// Add more Gateway SDKs here if any in future.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
)
|
||||
|
||||
// Returns a hexadecimal representation of time at the
|
||||
@@ -68,6 +69,13 @@ func encodeResponseJSON(response interface{}) []byte {
|
||||
return bytesBuffer.Bytes()
|
||||
}
|
||||
|
||||
// Write parts count
|
||||
func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
|
||||
if strings.Contains(objInfo.ETag, "-") && len(objInfo.Parts) > 0 {
|
||||
w.Header()[xhttp.AmzMpPartsCount] = []string{strconv.Itoa(len(objInfo.Parts))}
|
||||
}
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
|
||||
// set common headers
|
||||
@@ -82,10 +90,6 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""}
|
||||
}
|
||||
|
||||
if strings.Contains(objInfo.ETag, "-") && len(objInfo.Parts) > 0 {
|
||||
w.Header().Set(xhttp.AmzMpPartsCount, strconv.Itoa(len(objInfo.Parts)))
|
||||
}
|
||||
|
||||
if objInfo.ContentType != "" {
|
||||
w.Header().Set(xhttp.ContentType, objInfo.ContentType)
|
||||
}
|
||||
@@ -97,6 +101,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
if !objInfo.Expires.IsZero() {
|
||||
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
if globalCacheConfig.Enabled {
|
||||
w.Header().Set(xhttp.XCache, objInfo.CacheStatus.String())
|
||||
w.Header().Set(xhttp.XCacheLookup, objInfo.CacheLookupStatus.String())
|
||||
@@ -105,34 +110,34 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
// Set tag count if object has tags
|
||||
tags, _ := url.ParseQuery(objInfo.UserTags)
|
||||
tagCount := len(tags)
|
||||
if tagCount != 0 {
|
||||
w.Header().Set(xhttp.AmzTagCount, strconv.Itoa(tagCount))
|
||||
if tagCount > 0 {
|
||||
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tagCount)}
|
||||
}
|
||||
|
||||
// Set all other user defined metadata.
|
||||
for k, v := range objInfo.UserDefined {
|
||||
if HasPrefix(k, ReservedMetadataPrefix) {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
w.Header().Set(k, v)
|
||||
var isSet bool
|
||||
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
|
||||
if !strings.HasPrefix(k, userMetadataPrefix) {
|
||||
continue
|
||||
}
|
||||
w.Header()[strings.ToLower(k)] = []string{v}
|
||||
isSet = true
|
||||
break
|
||||
}
|
||||
if !isSet {
|
||||
w.Header().Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
var totalObjectSize int64
|
||||
switch {
|
||||
case crypto.IsEncrypted(objInfo.UserDefined):
|
||||
totalObjectSize, err = objInfo.DecryptedSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case objInfo.IsCompressed():
|
||||
totalObjectSize = objInfo.GetActualSize()
|
||||
if totalObjectSize < 0 {
|
||||
return errInvalidDecompressedSize
|
||||
}
|
||||
default:
|
||||
totalObjectSize = objInfo.Size
|
||||
totalObjectSize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// for providing ranged content
|
||||
@@ -148,5 +153,26 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
w.Header().Set(xhttp.ContentRange, contentRange)
|
||||
}
|
||||
|
||||
// Set the relevant version ID as part of the response header.
|
||||
if objInfo.VersionID != "" {
|
||||
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
|
||||
}
|
||||
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
VersionID: objInfo.VersionID,
|
||||
ModTime: objInfo.ModTime,
|
||||
IsLatest: objInfo.IsLatest,
|
||||
DeleteMarker: objInfo.DeleteMarker,
|
||||
})
|
||||
if !expiryTime.IsZero() {
|
||||
w.Header()[xhttp.AmzExpiration] = []string{
|
||||
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -33,8 +33,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormatAMZLong = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 10000 // Limit number of objects in a listObjectsResponse.
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 10000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
)
|
||||
@@ -233,10 +235,23 @@ type Bucket struct {
|
||||
|
||||
// ObjectVersion container for object version metadata
|
||||
type ObjectVersion struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version" json:"-"`
|
||||
Object
|
||||
VersionID string `xml:"VersionId"`
|
||||
IsLatest bool
|
||||
VersionID string `xml:"VersionId"`
|
||||
|
||||
isDeleteMarker bool
|
||||
}
|
||||
|
||||
// MarshalXML - marshal ObjectVersion
|
||||
func (o ObjectVersion) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
if o.isDeleteMarker {
|
||||
start.Name.Local = "DeleteMarker"
|
||||
} else {
|
||||
start.Name.Local = "Version"
|
||||
}
|
||||
|
||||
type objectVersionWrapper ObjectVersion
|
||||
return e.EncodeElement(objectVersionWrapper(o), start)
|
||||
}
|
||||
|
||||
// StringMap is a map[string]string.
|
||||
@@ -331,9 +346,10 @@ type CompleteMultipartUploadResponse struct {
|
||||
|
||||
// DeleteError structure.
|
||||
type DeleteError struct {
|
||||
Code string
|
||||
Message string
|
||||
Key string
|
||||
Code string
|
||||
Message string
|
||||
Key string
|
||||
VersionID string `xml:"VersionId"`
|
||||
}
|
||||
|
||||
// DeleteObjectsResponse container for multiple object deletes.
|
||||
@@ -341,7 +357,7 @@ type DeleteObjectsResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
|
||||
|
||||
// Collection of all deleted objects
|
||||
DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"`
|
||||
DeletedObjects []DeletedObject `xml:"Deleted,omitempty"`
|
||||
|
||||
// Collection of errors deleting certain objects.
|
||||
Errors []DeleteError `xml:"Error,omitempty"`
|
||||
@@ -400,7 +416,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
for _, bucket := range buckets {
|
||||
var listbucket = Bucket{}
|
||||
listbucket.Name = bucket.Name
|
||||
listbucket.CreationDate = bucket.Created.UTC().Format(timeFormatAMZLong)
|
||||
listbucket.CreationDate = bucket.Created.UTC().Format(iso8601TimeFormat)
|
||||
listbuckets = append(listbuckets, listbucket)
|
||||
}
|
||||
|
||||
@@ -411,7 +427,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
}
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListVersionsResponse {
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
var versions []ObjectVersion
|
||||
var prefixes []CommonPrefix
|
||||
var owner = Owner{}
|
||||
@@ -424,20 +440,28 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
content.Owner = owner
|
||||
content.VersionID = "null"
|
||||
content.IsLatest = true
|
||||
content.VersionID = object.VersionID
|
||||
if content.VersionID == "" {
|
||||
content.VersionID = nullVersionID
|
||||
}
|
||||
content.IsLatest = object.IsLatest
|
||||
content.isDeleteMarker = object.DeleteMarker
|
||||
versions = append(versions, content)
|
||||
}
|
||||
|
||||
data.Name = bucket
|
||||
data.Versions = versions
|
||||
|
||||
data.EncodingType = encodingType
|
||||
data.Prefix = s3EncodeName(prefix, encodingType)
|
||||
data.KeyMarker = s3EncodeName(marker, encodingType)
|
||||
@@ -445,6 +469,8 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextKeyMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.NextVersionIDMarker = resp.NextVersionIDMarker
|
||||
data.VersionIDMarker = versionIDMarker
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
for _, prefix := range resp.Prefixes {
|
||||
@@ -470,12 +496,16 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
content.Owner = owner
|
||||
contents = append(contents, content)
|
||||
}
|
||||
@@ -516,17 +546,21 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
content.Owner = owner
|
||||
if metadata {
|
||||
content.UserMetadata = make(StringMap)
|
||||
for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if HasPrefix(k, ReservedMetadataPrefix) {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
@@ -561,7 +595,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectResponse {
|
||||
return CopyObjectResponse{
|
||||
ETag: "\"" + etag + "\"",
|
||||
LastModified: lastModified.UTC().Format(timeFormatAMZLong),
|
||||
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -569,7 +603,7 @@ func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectR
|
||||
func generateCopyObjectPartResponse(etag string, lastModified time.Time) CopyObjectPartResponse {
|
||||
return CopyObjectPartResponse{
|
||||
ETag: "\"" + etag + "\"",
|
||||
LastModified: lastModified.UTC().Format(timeFormatAMZLong),
|
||||
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -588,7 +622,8 @@ func generateCompleteMultpartUploadResponse(bucket, key, location, etag string)
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
ETag: etag,
|
||||
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
|
||||
ETag: "\"" + etag + "\"",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -613,7 +648,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
newPart.PartNumber = part.PartNumber
|
||||
newPart.ETag = "\"" + part.ETag + "\""
|
||||
newPart.Size = part.Size
|
||||
newPart.LastModified = part.LastModified.UTC().Format(timeFormatAMZLong)
|
||||
newPart.LastModified = part.LastModified.UTC().Format(iso8601TimeFormat)
|
||||
listPartsResponse.Parts[index] = newPart
|
||||
}
|
||||
return listPartsResponse
|
||||
@@ -643,23 +678,30 @@ func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMult
|
||||
newUpload := Upload{}
|
||||
newUpload.UploadID = upload.UploadID
|
||||
newUpload.Key = s3EncodeName(upload.Object, encodingType)
|
||||
newUpload.Initiated = upload.Initiated.UTC().Format(timeFormatAMZLong)
|
||||
newUpload.Initiated = upload.Initiated.UTC().Format(iso8601TimeFormat)
|
||||
listMultipartUploadsResponse.Uploads[index] = newUpload
|
||||
}
|
||||
return listMultipartUploadsResponse
|
||||
}
|
||||
|
||||
// generate multi objects delete response.
|
||||
func generateMultiDeleteResponse(quiet bool, deletedObjects []ObjectIdentifier, errs []DeleteError) DeleteObjectsResponse {
|
||||
func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, errs []DeleteError) DeleteObjectsResponse {
|
||||
deleteResp := DeleteObjectsResponse{}
|
||||
if !quiet {
|
||||
deleteResp.DeletedObjects = deletedObjects
|
||||
}
|
||||
if len(errs) == len(deletedObjects) {
|
||||
deleteResp.DeletedObjects = nil
|
||||
}
|
||||
deleteResp.Errors = errs
|
||||
return deleteResp
|
||||
}
|
||||
|
||||
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
if newObjectLayerFn() == nil {
|
||||
// Server still in safe mode.
|
||||
w.Header().Set(xhttp.MinIOServerStatus, "safemode")
|
||||
}
|
||||
setCommonHeaders(w)
|
||||
if mType != mimeNone {
|
||||
w.Header().Set(xhttp.ContentType, string(mType))
|
||||
@@ -722,6 +764,10 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalBrowserEnabled {
|
||||
if newObjectLayerFn() == nil {
|
||||
// server still in safe mode.
|
||||
w.Header().Set(xhttp.MinIOServerStatus, "safemode")
|
||||
}
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
@@ -753,17 +799,6 @@ func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIE
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
// writeVersionMismatchResponse - writes custom error responses for version mismatches.
|
||||
func writeVersionMismatchResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL, isJSON bool) {
|
||||
if isJSON {
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.String(), w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
||||
writeResponse(w, err.HTTPStatusCode, encodeResponseJSON(errorResponse), mimeJSON)
|
||||
} else {
|
||||
writeResponse(w, err.HTTPStatusCode, []byte(err.Description), mimeNone)
|
||||
}
|
||||
}
|
||||
|
||||
// writeCustomErrorResponseJSON - similar to writeErrorResponseJSON,
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
func newHTTPServerFn() *xhttp.Server {
|
||||
@@ -83,149 +85,255 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
var routers []*mux.Router
|
||||
for _, domainName := range globalDomainNames {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName+":{port:.*}").Subrouter())
|
||||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
for _, bucket := range routers {
|
||||
// Object operations
|
||||
// HeadObject
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler)))
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
|
||||
// CopyObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectParts
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler))).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler))).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler))).Queries("uploads", "")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler))).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler))).Queries("acl", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
|
||||
// PutObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler))).Queries("acl", "")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
|
||||
// GetObjectTagging
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler))).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// PutObjectTagging
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler))).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// DeleteObjectTagging
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler))).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// SelectObjectContent
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler))).Queries("select", "").Queries("select-type", "2")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
|
||||
// GetObjectRetention
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler))).Queries("retention", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
|
||||
// GetObjectLegalHold
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler))).Queries("legal-hold", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
// GetObject
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler)))
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
|
||||
// CopyObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler)))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler))).Queries("retention", "")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
|
||||
// PutObjectLegalHold
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler))).Queries("legal-hold", "")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
|
||||
// PutObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler)))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler))))
|
||||
// DeleteObject
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler)))
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler))))
|
||||
|
||||
/// Bucket operations
|
||||
// GetBucketLocation
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler))).Queries("location", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
|
||||
// GetBucketPolicy
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler))).Queries("policy", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
|
||||
// GetBucketLifecycle
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketEncryption
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler))).Queries("encryption", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler))).Queries("acl", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
|
||||
// PutBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler))).Queries("acl", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
|
||||
// GetBucketCors - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler))).Queries("cors", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler))).Queries("website", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
|
||||
// GetBucketAccelerateHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler))).Queries("accelerate", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
|
||||
// GetBucketRequestPaymentHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler))).Queries("requestPayment", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
|
||||
// GetBucketLoggingHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler))).Queries("logging", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
|
||||
// GetBucketLifecycleHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketReplicationHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler))).Queries("replication", "")
|
||||
// GetBucketTaggingHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler))).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler)))).Queries("replication", "")
|
||||
// GetBucketTaggingHandler
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler))).Queries("website", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler))).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
|
||||
|
||||
// GetBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler))).Queries("object-lock", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// GetBucketVersioning
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler))).Queries("versioning", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler))).Queries("notification", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
|
||||
// ListenBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listenbucketnotification", httpTraceAll(api.ListenBucketNotificationHandler))).Queries("events", "{events:.*}")
|
||||
// ListMultipartUploads
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler))).Queries("uploads", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
|
||||
// ListObjectsV2M
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler))).Queries("list-type", "2", "metadata", "true")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
|
||||
// ListObjectsV2
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler))).Queries("list-type", "2")
|
||||
// ListBucketVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler))).Queries("versions", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
|
||||
// ListObjectVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectversions", httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler)))
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler))))
|
||||
// PutBucketLifecycle
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// PutBucketEncryption
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler))).Queries("encryption", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
|
||||
// PutBucketPolicy
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler))).Queries("policy", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
|
||||
|
||||
// PutBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler))).Queries("object-lock", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// PutBucketTaggingHandler
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbuckettagging", httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
|
||||
// PutBucketVersioning
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler))).Queries("versioning", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// PutBucketNotification
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler))).Queries("notification", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
|
||||
// PutBucket
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler)))
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler))))
|
||||
// HeadBucket
|
||||
bucket.Methods(http.MethodHead).HandlerFunc(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler)))
|
||||
bucket.Methods(http.MethodHead).HandlerFunc(
|
||||
maxClients(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler))))
|
||||
// PostPolicy
|
||||
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler)))
|
||||
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
|
||||
maxClients(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler))))
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler))).Queries("delete", "")
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler))).Queries("policy", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
|
||||
// DeleteBucketLifecycle
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// DeleteBucketEncryption
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler))).Queries("encryption", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
// DeleteBucket
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler)))
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler))))
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler)))
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// S3 browser with signature v4 adds '//' for ListBuckets request, so rather
|
||||
// than failing with UnknownAPIRequest we simply handle it for now.
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.MethodNotAllowedHandler = http.HandlerFunc(collectAPIStats("methodnotallowed", httpTraceAll(errorResponseHandler)))
|
||||
|
||||
}
|
||||
|
||||
// corsHandler handler for CORS (Cross Origin Resource Sharing)
|
||||
func corsHandler(handler http.Handler) http.Handler {
|
||||
commonS3Headers := []string{
|
||||
xhttp.Date,
|
||||
xhttp.ETag,
|
||||
xhttp.ServerInfo,
|
||||
xhttp.Connection,
|
||||
xhttp.AcceptRanges,
|
||||
xhttp.ContentRange,
|
||||
xhttp.ContentEncoding,
|
||||
xhttp.ContentLength,
|
||||
xhttp.ContentType,
|
||||
"X-Amz*",
|
||||
"x-amz*",
|
||||
"*",
|
||||
}
|
||||
|
||||
return cors.New(cors.Options{
|
||||
AllowOriginFunc: func(origin string) bool {
|
||||
for _, allowedOrigin := range globalAPIConfig.getCorsAllowOrigins() {
|
||||
if wildcard.MatchSimple(allowedOrigin, origin) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
},
|
||||
AllowedMethods: []string{
|
||||
http.MethodGet,
|
||||
http.MethodPut,
|
||||
http.MethodHead,
|
||||
http.MethodPost,
|
||||
http.MethodDelete,
|
||||
http.MethodOptions,
|
||||
http.MethodPatch,
|
||||
},
|
||||
AllowedHeaders: commonS3Headers,
|
||||
ExposedHeaders: commonS3Headers,
|
||||
AllowCredentials: true,
|
||||
}).Handler(handler)
|
||||
}
|
||||
|
||||
@@ -26,12 +26,15 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
xjwt "github.com/minio/minio/cmd/jwt"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
@@ -118,9 +121,7 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
return authTypeUnknown
|
||||
}
|
||||
|
||||
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
|
||||
func validateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
s3Err := ErrAccessDenied
|
||||
@@ -129,7 +130,7 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iamp
|
||||
// We only support admin credentials to access admin APIs.
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err != ErrNone {
|
||||
return cred, s3Err
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
// we only support V4 (no presign) with auth body
|
||||
@@ -139,14 +140,24 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iamp
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
|
||||
ctx := logger.SetReqInfo(ctx, reqInfo)
|
||||
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description), logger.Application)
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
var claims map[string]interface{}
|
||||
claims, s3Err = checkClaimsFromToken(r, cred)
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
return cred, claims, owner, ErrNone
|
||||
}
|
||||
|
||||
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, region)
|
||||
if s3Err != ErrNone {
|
||||
return cred, s3Err
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
@@ -173,15 +184,13 @@ func getSessionToken(r *http.Request) (token string) {
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
claims, _ := getClaimsFromToken(r)
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
func getClaimsFromToken(r *http.Request, token string) (map[string]interface{}, error) {
|
||||
claims := xjwt.NewMapClaims()
|
||||
|
||||
token := getSessionToken(r)
|
||||
if token == "" {
|
||||
return claims.Map(), nil
|
||||
}
|
||||
@@ -204,15 +213,16 @@ func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
|
||||
if globalPolicyOPA == nil {
|
||||
// If OPA is not set and if ldap claim key is set, allow the claim.
|
||||
if _, ok := claims.Lookup(ldapUser); ok {
|
||||
if _, ok := claims.MapClaims[ldapUser]; ok {
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// If OPA is not set, session token should
|
||||
// have a policy and its mandatory, reject
|
||||
// requests without policy claim.
|
||||
_, pok := claims.Lookup(iamPolicyClaimName())
|
||||
if !pok {
|
||||
_, pokOpenID := claims.MapClaims[iamPolicyClaimNameOpenID()]
|
||||
_, pokSA := claims.MapClaims[iamPolicyClaimNameSA()]
|
||||
if !pokOpenID && !pokSA {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
|
||||
@@ -226,7 +236,7 @@ func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
if err != nil {
|
||||
// Base64 decoding fails, we should log to indicate
|
||||
// something is malforming the request sent by client.
|
||||
logger.LogIf(context.Background(), err, logger.Application)
|
||||
logger.LogIf(r.Context(), err, logger.Application)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
@@ -241,12 +251,15 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
return nil, ErrNoAccessKey
|
||||
}
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(r)
|
||||
claims, err := getClaimsFromToken(r, token)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(context.Background(), err)
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
}
|
||||
@@ -271,7 +284,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
var cred auth.Credentials
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
return accessKey, owner, ErrSignatureVersionNotSupported
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
@@ -333,7 +346,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
@@ -347,7 +360,8 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '2'.
|
||||
@@ -410,7 +424,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
r.Body = ioutil.NopCloser(reader)
|
||||
r.Body = reader
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
@@ -460,7 +474,107 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(context.Background(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
|
||||
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, map[string]interface{}, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
var s3Err APIErrorCode
|
||||
switch atype {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return cred, owner, nil, ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalServerRegion
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
|
||||
return cred, owner, claims, ErrNone
|
||||
}
|
||||
|
||||
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool, claims map[string]interface{}) (s3Err APIErrorCode) {
|
||||
var retSet bool
|
||||
if cred.AccessKey == "" {
|
||||
conditions := getConditionValues(r, "", "", nil)
|
||||
conditions["object-lock-mode"] = []string{string(retMode)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
|
||||
if retDays > 0 {
|
||||
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
|
||||
}
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.Action(policy.BypassGovernanceRetentionAction),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
})
|
||||
}
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.Action(policy.PutObjectRetentionAction),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
retSet = true
|
||||
}
|
||||
if byPassSet || retSet {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
conditions := getConditionValues(r, "", cred.AccessKey, claims)
|
||||
conditions["object-lock-mode"] = []string{string(retMode)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
|
||||
if retDays > 0 {
|
||||
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
|
||||
}
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ObjectName: objectName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
})
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
retSet = true
|
||||
}
|
||||
if byPassSet || retSet {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
|
||||
@@ -471,7 +585,7 @@ func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.R
|
||||
var owner bool
|
||||
switch atype {
|
||||
case authTypeUnknown:
|
||||
return ErrAccessDenied
|
||||
return ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
|
||||
@@ -487,10 +601,19 @@ func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.R
|
||||
return s3Err
|
||||
}
|
||||
|
||||
// Do not check for PutObjectRetentionAction permission,
|
||||
// if mode and retain until date are not set.
|
||||
// Can happen when bucket has default lock config set
|
||||
if action == iampolicy.PutObjectRetentionAction &&
|
||||
r.Header.Get(xhttp.AmzObjectLockMode) == "" &&
|
||||
r.Header.Get(xhttp.AmzObjectLockRetainUntilDate) == "" {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectAction,
|
||||
Action: policy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
IsOwner: false,
|
||||
|
||||
@@ -391,6 +391,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
@@ -425,3 +426,48 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAdminSignature(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
|
||||
creds, err := auth.CreateCredentials("admin", "mypassword")
|
||||
if err != nil {
|
||||
t.Fatalf("unable create credential, %s", err)
|
||||
}
|
||||
globalActiveCred = creds
|
||||
|
||||
testCases := []struct {
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
ErrCode APIErrorCode
|
||||
}{
|
||||
{"", "", ErrInvalidAccessKeyID},
|
||||
{"admin", "", ErrSignatureDoesNotMatch},
|
||||
{"admin", "wrongpassword", ErrSignatureDoesNotMatch},
|
||||
{"wronguser", "mypassword", ErrInvalidAccessKeyID},
|
||||
{"", "mypassword", ErrInvalidAccessKeyID},
|
||||
{"admin", "mypassword", ErrNone},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
req := mustNewRequest("GET", "http://localhost:9000/", 0, nil, t)
|
||||
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
_, _, _, s3Error := validateAdminSignature(ctx, req, globalMinioDefaultRegion)
|
||||
if s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i+1, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -29,8 +30,10 @@ import (
|
||||
// path: 'bucket/' or '/bucket/' => Heal bucket
|
||||
// path: 'bucket/object' => Heal object
|
||||
type healTask struct {
|
||||
path string
|
||||
opts madmin.HealOpts
|
||||
bucket string
|
||||
object string
|
||||
versionID string
|
||||
opts madmin.HealOpts
|
||||
// Healing response will be sent here
|
||||
responseCh chan healResult
|
||||
}
|
||||
@@ -66,8 +69,7 @@ func waitForLowHTTPReq(tolerance int32) {
|
||||
}
|
||||
|
||||
// Wait for heal requests and process them
|
||||
func (h *healRoutine) run() {
|
||||
ctx := context.Background()
|
||||
func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
for {
|
||||
select {
|
||||
case task, ok := <-h.tasks:
|
||||
@@ -76,31 +78,33 @@ func (h *healRoutine) run() {
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(int32(globalEndpoints.Nodes()))
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()))
|
||||
|
||||
var res madmin.HealResultItem
|
||||
var err error
|
||||
bucket, object := path2BucketObject(task.path)
|
||||
switch {
|
||||
case bucket == "" && object == "":
|
||||
res, err = bgHealDiskFormat(ctx, task.opts)
|
||||
case bucket != "" && object == "":
|
||||
res, err = bgHealBucket(ctx, bucket, task.opts)
|
||||
case bucket != "" && object != "":
|
||||
res, err = bgHealObject(ctx, bucket, object, task.opts)
|
||||
case task.bucket == nopHeal:
|
||||
continue
|
||||
case task.bucket == SlashSeparator:
|
||||
res, err = healDiskFormat(ctx, objAPI, task.opts)
|
||||
case task.bucket != "" && task.object == "":
|
||||
res, err = objAPI.HealBucket(ctx, task.bucket, task.opts.DryRun, task.opts.Remove)
|
||||
case task.bucket != "" && task.object != "":
|
||||
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
|
||||
}
|
||||
if task.responseCh != nil {
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
if task.bucket != "" && task.object != "" {
|
||||
ObjectPathUpdated(path.Join(task.bucket, task.object))
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
case <-h.doneCh:
|
||||
return
|
||||
case <-GlobalServiceDoneCh:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func initHealRoutine() *healRoutine {
|
||||
func newHealRoutine() *healRoutine {
|
||||
return &healRoutine{
|
||||
tasks: make(chan healTask),
|
||||
doneCh: make(chan struct{}),
|
||||
@@ -108,45 +112,28 @@ func initHealRoutine() *healRoutine {
|
||||
|
||||
}
|
||||
|
||||
func startBackgroundHealing() {
|
||||
ctx := context.Background()
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
var objAPI ObjectLayer
|
||||
for {
|
||||
objAPI = newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
nh := newBgHealSequence()
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
if err := nh.healDiskMeta(); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(nh.ctx, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = initHealRoutine()
|
||||
go globalBackgroundHealRoutine.run()
|
||||
|
||||
// Launch the background healer sequence to track
|
||||
// background healing operations
|
||||
info := objAPI.StorageInfo(ctx, false)
|
||||
numDisks := info.Backend.OnlineDisks.Sum() + info.Backend.OfflineDisks.Sum()
|
||||
nh := newBgHealSequence(numDisks)
|
||||
globalBackgroundHealState.LaunchNewHealSequence(nh)
|
||||
}
|
||||
|
||||
func initBackgroundHealing() {
|
||||
go startBackgroundHealing()
|
||||
}
|
||||
|
||||
// bgHealDiskFormat - heals format.json, return value indicates if a
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func bgHealDiskFormat(ctx context.Context, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
res, err := objectAPI.HealFormat(ctx, opts.DryRun)
|
||||
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
res, err := objAPI.HealFormat(ctx, opts.DryRun)
|
||||
|
||||
// return any error, ignore error returned when disks have
|
||||
// already healed.
|
||||
@@ -157,34 +144,21 @@ func bgHealDiskFormat(ctx context.Context, opts madmin.HealOpts) (madmin.HealRes
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers if healing is not required.
|
||||
if err == nil {
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(opts.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
// Notify servers in background and retry if needed.
|
||||
go func() {
|
||||
retry:
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(opts.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
if nerr.Err.Error() == errServerNotInitialized.Error() {
|
||||
time.Sleep(time.Second)
|
||||
goto retry
|
||||
}
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// bghealBucket - traverses and heals given bucket
|
||||
func bgHealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
return objectAPI.HealBucket(ctx, bucket, opts.DryRun, opts.Remove)
|
||||
}
|
||||
|
||||
// bgHealObject - heal the given object and record result
|
||||
func bgHealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
return objectAPI.HealObject(ctx, bucket, object, opts.DryRun, opts.Remove, opts.ScanMode)
|
||||
}
|
||||
|
||||
@@ -25,32 +25,19 @@ import (
|
||||
|
||||
const defaultMonitorNewDiskInterval = time.Minute * 10
|
||||
|
||||
func initLocalDisksAutoHeal() {
|
||||
go monitorLocalDisksAndHeal()
|
||||
func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
go monitorLocalDisksAndHeal(ctx, objAPI)
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal() {
|
||||
// Wait until the object layer is ready
|
||||
var objAPI ObjectLayer
|
||||
for {
|
||||
objAPI = newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
z, ok := objAPI.(*xlZones)
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
z, ok := objAPI.(*erasureZones)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var bgSeq *healSequence
|
||||
var found bool
|
||||
|
||||
@@ -64,64 +51,74 @@ func monitorLocalDisksAndHeal() {
|
||||
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
for {
|
||||
time.Sleep(defaultMonitorNewDiskInterval)
|
||||
|
||||
// Attempt a heal as the server starts-up first.
|
||||
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
|
||||
for i, ep := range globalEndpoints {
|
||||
localDisksToHeal := Endpoints{}
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(defaultMonitorNewDiskInterval):
|
||||
// Attempt a heal as the server starts-up first.
|
||||
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
|
||||
var healNewDisks bool
|
||||
for i, ep := range globalEndpoints {
|
||||
localDisksToHeal := Endpoints{}
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
if err == errUnformattedDisk {
|
||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||
}
|
||||
}
|
||||
if len(localDisksToHeal) == 0 {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
if err == errUnformattedDisk {
|
||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||
}
|
||||
localDisksInZoneHeal[i] = localDisksToHeal
|
||||
healNewDisks = true
|
||||
}
|
||||
if len(localDisksToHeal) == 0 {
|
||||
|
||||
// Reformat disks only if needed.
|
||||
if !healNewDisks {
|
||||
continue
|
||||
}
|
||||
localDisksInZoneHeal[i] = localDisksToHeal
|
||||
}
|
||||
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- SlashSeparator
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- nopHeal
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{bucket: nopHeal}
|
||||
|
||||
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
|
||||
// Compute the list of erasure set to heal
|
||||
for i, localDisksToHeal := range localDisksInZoneHeal {
|
||||
var erasureSetToHeal []int
|
||||
for _, endpoint := range localDisksToHeal {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
|
||||
// Compute the list of erasure set to heal
|
||||
for i, localDisksToHeal := range localDisksInZoneHeal {
|
||||
var erasureSetToHeal []int
|
||||
for _, endpoint := range localDisksToHeal {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||
}
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||
erasureSetInZoneToHeal[i] = erasureSetToHeal
|
||||
}
|
||||
erasureSetInZoneToHeal[i] = erasureSetToHeal
|
||||
}
|
||||
|
||||
// Heal all erasure sets that need
|
||||
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
|
||||
for _, setIndex := range erasureSetToHeal {
|
||||
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// Heal all erasure sets that need
|
||||
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
|
||||
for _, setIndex := range erasureSetToHeal {
|
||||
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].drivesPerSet)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,11 +28,6 @@ import (
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
// Prepare XL/FS backend for benchmark.
|
||||
func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error) {
|
||||
return prepareTestBackend(instanceType)
|
||||
}
|
||||
|
||||
// Benchmark utility functions for ObjectLayer.PutObject().
|
||||
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
|
||||
func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
@@ -40,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -81,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
object := getRandomObjectName()
|
||||
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -132,10 +127,12 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
|
||||
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -146,10 +143,12 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
runPutObjectPartBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
|
||||
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -160,10 +159,12 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
runPutObjectBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
|
||||
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
|
||||
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -180,7 +181,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -189,7 +190,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
|
||||
// generate etag for the generated data.
|
||||
// etag of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
||||
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
md5hex := getMD5Hash(textData)
|
||||
@@ -239,10 +240,12 @@ func generateBytesData(size int) []byte {
|
||||
return bytes.Repeat(getRandomByte(), size)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
||||
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -253,10 +256,12 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
runGetObjectBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
||||
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
||||
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -273,7 +278,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -317,7 +322,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -326,7 +331,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
||||
|
||||
md5hex := getMD5Hash([]byte(textData))
|
||||
sha256hex := ""
|
||||
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
@@ -27,6 +26,14 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
type errHashMismatch struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (err *errHashMismatch) Error() string {
|
||||
return err.message
|
||||
}
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow *io.PipeWriter
|
||||
@@ -132,9 +139,9 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err = fmt.Errorf("hashes do not match expected %s, got %s",
|
||||
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))
|
||||
logger.LogIf(context.Background(), err)
|
||||
err := &errHashMismatch{fmt.Sprintf("hashes do not match expected %s, got %s",
|
||||
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
}
|
||||
b.currOffset += int64(len(buf))
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
@@ -36,12 +35,12 @@ type wholeBitrotWriter struct {
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
err := b.disk.AppendFile(b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
}
|
||||
_, err = b.Hash.Write(p)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
@@ -70,14 +69,14 @@ func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error)
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
ctx := context.Background()
|
||||
ctx := GlobalContext
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
|
||||
logger.LogIf(ctx, err)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(context.Background(), errLessData)
|
||||
logger.LogIf(GlobalContext, errLessData)
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
@@ -31,25 +30,6 @@ import (
|
||||
// magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key.
|
||||
var magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
|
||||
|
||||
// BitrotAlgorithm specifies a algorithm used for bitrot protection.
|
||||
type BitrotAlgorithm uint
|
||||
|
||||
const (
|
||||
// SHA256 represents the SHA-256 hash function
|
||||
SHA256 BitrotAlgorithm = 1 + iota
|
||||
// HighwayHash256 represents the HighwayHash-256 hash function
|
||||
HighwayHash256
|
||||
// HighwayHash256S represents the Streaming HighwayHash-256 hash function
|
||||
HighwayHash256S
|
||||
// BLAKE2b512 represents the BLAKE2b-512 hash function
|
||||
BLAKE2b512
|
||||
)
|
||||
|
||||
// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection.
|
||||
const (
|
||||
DefaultBitrotAlgorithm = HighwayHash256S
|
||||
)
|
||||
|
||||
var bitrotAlgorithms = map[BitrotAlgorithm]string{
|
||||
SHA256: "sha256",
|
||||
BLAKE2b512: "blake2b",
|
||||
@@ -72,7 +52,7 @@ func (a BitrotAlgorithm) New() hash.Hash {
|
||||
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
|
||||
return hh
|
||||
default:
|
||||
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
|
||||
logger.CriticalIf(GlobalContext, errors.New("Unsupported bitrot algorithm"))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -88,7 +68,7 @@ func (a BitrotAlgorithm) Available() bool {
|
||||
func (a BitrotAlgorithm) String() string {
|
||||
name, ok := bitrotAlgorithms[a]
|
||||
if !ok {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
|
||||
logger.CriticalIf(GlobalContext, errors.New("Unsupported bitrot algorithm"))
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
volume := "testvol"
|
||||
filePath := "testfile"
|
||||
|
||||
disk, err := newPosix(tmpDir)
|
||||
disk, err := newXLStorage(tmpDir, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -20,16 +20,16 @@ import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/rest"
|
||||
@@ -43,6 +43,7 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
bootstrapRESTMethodHealth = "/health"
|
||||
bootstrapRESTMethodVerify = "/verify"
|
||||
)
|
||||
|
||||
@@ -62,9 +63,9 @@ func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
|
||||
return fmt.Errorf("Expected platform '%s', found to be running '%s'",
|
||||
s1.MinioPlatform, s2.MinioPlatform)
|
||||
}
|
||||
if s1.MinioEndpoints.Nodes() != s2.MinioEndpoints.Nodes() {
|
||||
return fmt.Errorf("Expected number of endpoints %d, seen %d", s1.MinioEndpoints.Nodes(),
|
||||
s2.MinioEndpoints.Nodes())
|
||||
if s1.MinioEndpoints.NEndpoints() != s2.MinioEndpoints.NEndpoints() {
|
||||
return fmt.Errorf("Expected number of endpoints %d, seen %d", s1.MinioEndpoints.NEndpoints(),
|
||||
s2.MinioEndpoints.NEndpoints())
|
||||
}
|
||||
|
||||
for i, ep := range s1.MinioEndpoints {
|
||||
@@ -94,6 +95,9 @@ func getServerSystemCfg() ServerSystemConfig {
|
||||
}
|
||||
}
|
||||
|
||||
// HealthHandler returns success if request is valid
|
||||
func (b *bootstrapRESTServer) HealthHandler(w http.ResponseWriter, r *http.Request) {}
|
||||
|
||||
func (b *bootstrapRESTServer) VerifyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "VerifyHandler")
|
||||
cfg := getServerSystemCfg()
|
||||
@@ -106,37 +110,23 @@ func registerBootstrapRESTHandlers(router *mux.Router) {
|
||||
server := &bootstrapRESTServer{}
|
||||
subrouter := router.PathPrefix(bootstrapRESTPrefix).Subrouter()
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path(bootstrapRESTVersionPrefix + bootstrapRESTMethodHealth).HandlerFunc(
|
||||
httpTraceHdrs(server.HealthHandler))
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path(bootstrapRESTVersionPrefix + bootstrapRESTMethodVerify).HandlerFunc(
|
||||
httpTraceHdrs(server.VerifyHandler))
|
||||
}
|
||||
|
||||
// client to talk to bootstrap Nodes.
|
||||
// client to talk to bootstrap NEndpoints.
|
||||
type bootstrapRESTClient struct {
|
||||
endpoint Endpoint
|
||||
restClient *rest.Client
|
||||
connected int32
|
||||
}
|
||||
|
||||
// Reconnect to a bootstrap rest server.k
|
||||
func (client *bootstrapRESTClient) reConnect() {
|
||||
atomic.StoreInt32(&client.connected, 1)
|
||||
}
|
||||
|
||||
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
|
||||
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
|
||||
// after verifying format.json
|
||||
func (client *bootstrapRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
|
||||
return client.callWithContext(context.Background(), method, values, body, length)
|
||||
}
|
||||
|
||||
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
|
||||
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
|
||||
// after verifying format.json
|
||||
func (client *bootstrapRESTClient) callWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
|
||||
if !client.IsOnline() {
|
||||
client.reConnect()
|
||||
}
|
||||
|
||||
if values == nil {
|
||||
values = make(url.Values)
|
||||
}
|
||||
@@ -146,10 +136,6 @@ func (client *bootstrapRESTClient) callWithContext(ctx context.Context, method s
|
||||
return respBody, nil
|
||||
}
|
||||
|
||||
if isNetworkError(err) {
|
||||
atomic.StoreInt32(&client.connected, 0)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -158,24 +144,12 @@ func (client *bootstrapRESTClient) String() string {
|
||||
return client.endpoint.String()
|
||||
}
|
||||
|
||||
// IsOnline - returns whether RPC client failed to connect or not.
|
||||
func (client *bootstrapRESTClient) IsOnline() bool {
|
||||
return atomic.LoadInt32(&client.connected) == 1
|
||||
}
|
||||
|
||||
// Close - marks the client as closed.
|
||||
func (client *bootstrapRESTClient) Close() error {
|
||||
atomic.StoreInt32(&client.connected, 0)
|
||||
client.restClient.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify - fetches system server config.
|
||||
func (client *bootstrapRESTClient) Verify(srcCfg ServerSystemConfig) (err error) {
|
||||
func (client *bootstrapRESTClient) Verify(ctx context.Context, srcCfg ServerSystemConfig) (err error) {
|
||||
if newObjectLayerFn() != nil {
|
||||
return nil
|
||||
}
|
||||
respBody, err := client.call(bootstrapRESTMethodVerify, nil, nil, -1)
|
||||
respBody, err := client.callWithContext(ctx, bootstrapRESTMethodVerify, nil, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -187,14 +161,17 @@ func (client *bootstrapRESTClient) Verify(srcCfg ServerSystemConfig) (err error)
|
||||
return srcCfg.Diff(recvCfg)
|
||||
}
|
||||
|
||||
func verifyServerSystemConfig(endpointZones EndpointZones) error {
|
||||
func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones) error {
|
||||
srcCfg := getServerSystemCfg()
|
||||
clnts := newBootstrapRESTClients(endpointZones)
|
||||
var onlineServers int
|
||||
var offlineEndpoints []string
|
||||
var retries int
|
||||
for onlineServers < len(clnts)/2 {
|
||||
for _, clnt := range clnts {
|
||||
if err := clnt.Verify(srcCfg); err != nil {
|
||||
if err := clnt.Verify(ctx, srcCfg); err != nil {
|
||||
if isNetworkError(err) {
|
||||
offlineEndpoints = append(offlineEndpoints, clnt.String())
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("%s as has incorrect configuration: %w", clnt.String(), err)
|
||||
@@ -204,6 +181,14 @@ func verifyServerSystemConfig(endpointZones EndpointZones) error {
|
||||
// Sleep for a while - so that we don't go into
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
retries++
|
||||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -220,11 +205,7 @@ func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient
|
||||
|
||||
// Only proceed for remote endpoints.
|
||||
if !endpoint.IsLocal {
|
||||
clnt, err := newBootstrapRESTClient(endpoint)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
clnts = append(clnts, clnt)
|
||||
clnts = append(clnts, newBootstrapRESTClient(endpoint))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -232,7 +213,7 @@ func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient
|
||||
}
|
||||
|
||||
// Returns a new bootstrap client.
|
||||
func newBootstrapRESTClient(endpoint Endpoint) (*bootstrapRESTClient, error) {
|
||||
func newBootstrapRESTClient(endpoint Endpoint) *bootstrapRESTClient {
|
||||
serverURL := &url.URL{
|
||||
Scheme: endpoint.Scheme,
|
||||
Host: endpoint.Host,
|
||||
@@ -247,11 +228,18 @@ func newBootstrapRESTClient(endpoint Endpoint) (*bootstrapRESTClient, error) {
|
||||
}
|
||||
}
|
||||
|
||||
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout, rest.DefaultRESTTimeout)
|
||||
restClient, err := rest.NewClient(serverURL, trFn, newAuthToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)
|
||||
restClient := rest.NewClient(serverURL, trFn, newAuthToken)
|
||||
restClient.HealthCheckFn = func() bool {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, restClient.HealthCheckTimeout)
|
||||
// Instantiate a new rest client for healthcheck
|
||||
// to avoid recursive healthCheckFn()
|
||||
respBody, err := rest.NewClient(serverURL, trFn, newAuthToken).CallWithContext(ctx, bootstrapRESTMethodHealth, nil, nil, -1)
|
||||
xhttp.DrainBody(respBody)
|
||||
cancel()
|
||||
var ne *rest.NetworkError
|
||||
return !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &ne)
|
||||
}
|
||||
|
||||
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient, connected: 1}, nil
|
||||
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient}
|
||||
}
|
||||
|
||||
@@ -18,13 +18,12 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
@@ -46,15 +45,14 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// PutBucketEncyrption API requires Content-Md5
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
|
||||
if !objAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketEncryptionAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -69,7 +67,12 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
// Parse bucket encryption xml
|
||||
encConfig, err := validateBucketSSEConfig(io.LimitReader(r.Body, maxBucketSSEConfigSize))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
|
||||
apiErr := APIError{
|
||||
Code: "MalformedXML",
|
||||
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrMalformedXML].Description, err),
|
||||
HTTPStatusCode: errorCodes[ErrMalformedXML].HTTPStatusCode,
|
||||
}
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -79,17 +82,17 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
// Store the bucket encryption configuration in the object layer
|
||||
if err = objAPI.SetBucketSSEConfig(ctx, bucket, encConfig); err != nil {
|
||||
configData, err := xml.Marshal(encConfig)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Update the in-memory bucket encryption cache
|
||||
globalBucketSSEConfigSys.Set(bucket, *encConfig)
|
||||
|
||||
// Update peer MinIO servers of the updated bucket encryption config
|
||||
globalNotificationSys.SetBucketSSEConfig(ctx, bucket, encConfig)
|
||||
// Store the bucket encryption configuration in the object layer
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
@@ -122,21 +125,20 @@ func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch bucket encryption configuration from object layer
|
||||
var encConfig *bucketsse.BucketSSEConfig
|
||||
if encConfig, err = objAPI.GetBucketSSEConfig(ctx, bucket); err != nil {
|
||||
config, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
var encConfigData []byte
|
||||
if encConfigData, err = xml.Marshal(encConfig); err != nil {
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write bucket encryption configuration to client
|
||||
writeSuccessResponseXML(w, encConfigData)
|
||||
writeSuccessResponseXML(w, configData)
|
||||
}
|
||||
|
||||
// DeleteBucketEncryptionHandler - Removes bucket encryption configuration
|
||||
@@ -167,15 +169,10 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
|
||||
}
|
||||
|
||||
// Delete bucket encryption config from object layer
|
||||
if err = objAPI.DeleteBucketSSEConfig(ctx, bucket); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Remove entry from the in-memory bucket encryption cache
|
||||
globalBucketSSEConfigSys.Remove(bucket)
|
||||
// Update peer MinIO servers of the updated bucket encryption config
|
||||
globalNotificationSys.RemoveBucketSSEConfig(ctx, bucket)
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
@@ -17,142 +17,32 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
)
|
||||
|
||||
// BucketSSEConfigSys - in-memory cache of bucket encryption config
|
||||
type BucketSSEConfigSys struct {
|
||||
sync.RWMutex
|
||||
bucketSSEConfigMap map[string]bucketsse.BucketSSEConfig
|
||||
}
|
||||
type BucketSSEConfigSys struct{}
|
||||
|
||||
// NewBucketSSEConfigSys - Creates an empty in-memory bucket encryption configuration cache
|
||||
func NewBucketSSEConfigSys() *BucketSSEConfigSys {
|
||||
return &BucketSSEConfigSys{
|
||||
bucketSSEConfigMap: make(map[string]bucketsse.BucketSSEConfig),
|
||||
}
|
||||
}
|
||||
|
||||
// load - Loads the bucket encryption configuration for the given list of buckets
|
||||
func (sys *BucketSSEConfigSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
for _, bucket := range buckets {
|
||||
config, err := objAPI.GetBucketSSEConfig(context.Background(), bucket.Name)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketSSEConfigNotFound); ok {
|
||||
sys.Remove(bucket.Name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
sys.Set(bucket.Name, *config)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init - Initializes in-memory bucket encryption config cache for the given list of buckets
|
||||
func (sys *BucketSSEConfigSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// We don't cache bucket encryption config in gateway mode, nothing to do.
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load bucket encryption config cache once during boot.
|
||||
return sys.load(buckets, objAPI)
|
||||
return &BucketSSEConfigSys{}
|
||||
}
|
||||
|
||||
// Get - gets bucket encryption config for the given bucket.
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (config bucketsse.BucketSSEConfig, ok bool) {
|
||||
// We don't cache bucket encryption config in gateway mode.
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
return
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
cfg, err := objAPI.GetBucketSSEConfig(context.Background(), bucket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return *cfg, true
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
config, ok = sys.bucketSSEConfigMap[bucket]
|
||||
return
|
||||
}
|
||||
|
||||
// Set - sets bucket encryption config to given bucket name.
|
||||
func (sys *BucketSSEConfigSys) Set(bucket string, config bucketsse.BucketSSEConfig) {
|
||||
// We don't cache bucket encryption config in gateway mode.
|
||||
if globalIsGateway {
|
||||
return
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
sys.bucketSSEConfigMap[bucket] = config
|
||||
}
|
||||
|
||||
// Remove - removes bucket encryption config for given bucket.
|
||||
func (sys *BucketSSEConfigSys) Remove(bucket string) {
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
delete(sys.bucketSSEConfigMap, bucket)
|
||||
}
|
||||
|
||||
// saveBucketSSEConfig - save bucket encryption config for given bucket.
|
||||
func saveBucketSSEConfig(ctx context.Context, objAPI ObjectLayer, bucket string, config *bucketsse.BucketSSEConfig) error {
|
||||
data, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Path to store bucket encryption config for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
|
||||
return saveConfig(ctx, objAPI, configFile, data)
|
||||
}
|
||||
|
||||
// getBucketSSEConfig - get bucket encryption config for given bucket.
|
||||
func getBucketSSEConfig(objAPI ObjectLayer, bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
// Path to bucket-encryption.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
|
||||
configData, err := readConfig(context.Background(), objAPI, configFile)
|
||||
if err != nil {
|
||||
if err == errConfigNotFound {
|
||||
err = BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bucketsse.ParseBucketSSEConfig(bytes.NewReader(configData))
|
||||
}
|
||||
|
||||
// removeBucketSSEConfig - removes bucket encryption config for given bucket.
|
||||
func removeBucketSSEConfig(ctx context.Context, objAPI ObjectLayer, bucket string) error {
|
||||
// Path to bucket-encryption.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
|
||||
|
||||
if err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile); err != nil {
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
return BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
}
|
||||
|
||||
// validateBucketSSEConfig parses bucket encryption configuration and validates if it is supported by MinIO.
|
||||
@@ -165,5 +55,6 @@ func validateBucketSSEConfig(r io.Reader) (*bucketsse.BucketSSEConfig, error) {
|
||||
if len(encConfig.Rules) == 1 && encConfig.Rules[0].DefaultEncryptionAction.Algorithm == bucketsse.AES256 {
|
||||
return encConfig, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("Unsupported bucket encryption configuration")
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
@@ -26,11 +25,13 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
@@ -45,10 +46,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
getBucketVersioningResponse = `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`
|
||||
objectLockConfig = "object-lock.xml"
|
||||
bucketObjectLockEnabledConfigFile = "object-lock-enabled.json"
|
||||
bucketObjectLockEnabledConfig = `{"x-amz-bucket-object-lock-enabled":true}`
|
||||
objectLockConfig = "object-lock.xml"
|
||||
bucketTaggingConfig = "tagging.xml"
|
||||
)
|
||||
|
||||
// Check if there are buckets on server without corresponding entry in etcd backend and
|
||||
@@ -70,7 +69,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// Get buckets in the DNS
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && err != dns.ErrNoEntriesFound {
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -118,12 +117,12 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(GlobalContext, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucket := range bucketsInConflict.ToSlice() {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
}
|
||||
|
||||
// Remove buckets that are in DNS for this server, but aren't local
|
||||
@@ -140,7 +139,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// We go to here, so we know the bucket no longer exists,
|
||||
// but is registered in DNS to this server
|
||||
if err = globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
bucket, err))
|
||||
}
|
||||
}
|
||||
@@ -266,7 +265,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
|
||||
accessKey, owner, s3Error := checkRequestAuthTypeToAccessKey(ctx, r, policy.ListAllMyBucketsAction, "", "")
|
||||
if s3Error != ErrNone {
|
||||
if s3Error != ErrNone && s3Error != ErrAccessDenied {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -295,32 +294,43 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
}
|
||||
|
||||
// Set prefix value for "s3:prefix" policy conditionals.
|
||||
r.Header.Set("prefix", "")
|
||||
if s3Error == ErrAccessDenied {
|
||||
// Set prefix value for "s3:prefix" policy conditionals.
|
||||
r.Header.Set("prefix", "")
|
||||
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
// err will be nil here as we already called this function
|
||||
// earlier in this request.
|
||||
claims, _ := getClaimsFromToken(r)
|
||||
var newBucketsInfo []BucketInfo
|
||||
for _, bucketInfo := range bucketsInfo {
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
newBucketsInfo = append(newBucketsInfo, bucketInfo)
|
||||
// err will be nil here as we already called this function
|
||||
// earlier in this request.
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
n := 0
|
||||
// Use the following trick to filter in place
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
for _, bucketInfo := range bucketsInfo {
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
bucketsInfo[n] = bucketInfo
|
||||
n++
|
||||
}
|
||||
}
|
||||
bucketsInfo = bucketsInfo[:n]
|
||||
// No buckets can be filtered return access denied error.
|
||||
if len(bucketsInfo) == 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Generate response.
|
||||
response := generateListBucketsResponse(newBucketsInfo)
|
||||
response := generateListBucketsResponse(bucketsInfo)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
|
||||
// Write response.
|
||||
@@ -367,77 +377,98 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objectAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
deleteObjectsFn := objectAPI.DeleteObjects
|
||||
if api.CacheAPI() != nil {
|
||||
deleteObjectsFn = api.CacheAPI().DeleteObjects
|
||||
}
|
||||
|
||||
var objectsToDelete = map[string]int{}
|
||||
var objectsToDelete = map[ObjectToDelete]int{}
|
||||
getObjectInfoFn := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
var dErrs = make([]APIErrorCode, len(deleteObjects.Objects))
|
||||
|
||||
dErrs := make([]DeleteError, len(deleteObjects.Objects))
|
||||
for index, object := range deleteObjects.Objects {
|
||||
if dErrs[index] = checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); dErrs[index] != ErrNone {
|
||||
if dErrs[index] == ErrSignatureDoesNotMatch || dErrs[index] == ErrInvalidAccessKeyID {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(dErrs[index]), r.URL, guessIsBrowserReq(r))
|
||||
if apiErrCode := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); apiErrCode != ErrNone {
|
||||
if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
dErrs[index] = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}
|
||||
continue
|
||||
}
|
||||
govBypassPerms := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object.ObjectName)
|
||||
if _, err := enforceRetentionBypassForDelete(ctx, r, bucket, object.ObjectName, getObjectInfoFn, govBypassPerms); err != ErrNone {
|
||||
dErrs[index] = err
|
||||
continue
|
||||
|
||||
if object.VersionID != "" {
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, getObjectInfoFn); apiErrCode != ErrNone {
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
dErrs[index] = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Avoid duplicate objects, we use map to filter them out.
|
||||
if _, ok := objectsToDelete[object.ObjectName]; !ok {
|
||||
objectsToDelete[object.ObjectName] = index
|
||||
if _, ok := objectsToDelete[object]; !ok {
|
||||
objectsToDelete[object] = index
|
||||
}
|
||||
}
|
||||
|
||||
toNames := func(input map[string]int) (output []string) {
|
||||
output = make([]string, len(input))
|
||||
toNames := func(input map[ObjectToDelete]int) (output []ObjectToDelete) {
|
||||
output = make([]ObjectToDelete, len(input))
|
||||
idx := 0
|
||||
for name := range input {
|
||||
output[idx] = name
|
||||
for obj := range input {
|
||||
output[idx] = obj
|
||||
idx++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
deleteList := toNames(objectsToDelete)
|
||||
errs, err := deleteObjectsFn(ctx, bucket, deleteList)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
})
|
||||
|
||||
for i, objName := range deleteList {
|
||||
dIdx := objectsToDelete[objName]
|
||||
dErrs[dIdx] = toAPIErrorCode(ctx, errs[i])
|
||||
}
|
||||
|
||||
// Collect deleted objects and errors if any.
|
||||
var deletedObjects []ObjectIdentifier
|
||||
var deleteErrors []DeleteError
|
||||
for index, errCode := range dErrs {
|
||||
object := deleteObjects.Objects[index]
|
||||
// Success deleted objects are collected separately.
|
||||
if errCode == ErrNone || errCode == ErrNoSuchKey {
|
||||
deletedObjects = append(deletedObjects, object)
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
for i := range errs {
|
||||
dindex := objectsToDelete[deleteList[i]]
|
||||
apiErr := toAPIError(ctx, errs[i])
|
||||
if apiErr.Code == "" || apiErr.Code == "NoSuchKey" {
|
||||
deletedObjects[dindex] = dObjects[i]
|
||||
continue
|
||||
}
|
||||
apiErr := getAPIError(errCode)
|
||||
// Error during delete should be collected separately.
|
||||
deleteErrors = append(deleteErrors, DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
})
|
||||
dErrs[dindex] = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: deleteList[i].ObjectName,
|
||||
VersionID: deleteList[i].VersionID,
|
||||
}
|
||||
}
|
||||
|
||||
var deleteErrors []DeleteError
|
||||
for _, dErr := range dErrs {
|
||||
if dErr.Code != "" {
|
||||
deleteErrors = append(deleteErrors, dErr)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate response
|
||||
@@ -449,12 +480,21 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
// Notify deleted event for objects.
|
||||
for _, dobj := range deletedObjects {
|
||||
objInfo := ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
}
|
||||
if dobj.DeleteMarker {
|
||||
objInfo = ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
VersionID: dobj.DeleteMarkerVersionID,
|
||||
}
|
||||
}
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectRemovedDelete,
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
},
|
||||
EventName: event.ObjectRemovedDelete,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
RespElements: extractRespElements(w),
|
||||
UserAgent: r.UserAgent(),
|
||||
@@ -509,30 +549,30 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
opts := BucketOptions{
|
||||
Location: location,
|
||||
LockEnabled: objectLockEnabled,
|
||||
}
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
sr, err := globalDNSConfig.Get(bucket)
|
||||
if err != nil {
|
||||
if err == dns.ErrNoEntriesFound {
|
||||
// Proceed to creating a bucket.
|
||||
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location); err != nil {
|
||||
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if objectLockEnabled {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, []byte(bucketObjectLockEnabledConfig)); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = globalDNSConfig.Put(bucket); err != nil {
|
||||
objectAPI.DeleteBucket(ctx, bucket)
|
||||
objectAPI.DeleteBucket(ctx, bucket, false)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Load updated bucket metadata into memory.
|
||||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
w.Header().Set(xhttp.Location,
|
||||
getObjectLocation(r, globalDomainNames, bucket, ""))
|
||||
@@ -557,21 +597,14 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// Proceed to creating a bucket.
|
||||
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location)
|
||||
err := objectAPI.MakeBucketWithLocation(ctx, bucket, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if objectLockEnabled && !globalIsGateway {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, []byte(bucketObjectLockEnabledConfig)); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
globalBucketObjectLockConfig.Set(bucket, objectlock.Retention{})
|
||||
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, objectlock.Retention{})
|
||||
}
|
||||
// Load updated bucket metadata into memory.
|
||||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
w.Header().Set(xhttp.Location, path.Clean(r.URL.Path)) // Clean any trailing slashes.
|
||||
@@ -606,7 +639,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
|
||||
// To detect if the client has disconnected.
|
||||
r.Body = &detectDisconnect{r.Body, r.Context().Done()}
|
||||
r.Body = &contextReader{r.Body, r.Context()}
|
||||
|
||||
// Require Content-Length to be set in the request
|
||||
size := r.ContentLength
|
||||
@@ -742,14 +775,16 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
rawReader := hashReader
|
||||
pReader := NewPutObjReader(rawReader, nil, nil)
|
||||
var objectEncryptionKey []byte
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
|
||||
// Check if bucket encryption is enabled
|
||||
_, encEnabled := globalBucketSSEConfigSys.Get(bucket)
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if (globalAutoEncryption || encEnabled) && !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
if _, err = globalBucketSSEConfigSys.Get(bucket); err == nil || globalAutoEncryption {
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
}
|
||||
}
|
||||
|
||||
// get gateway encryption options
|
||||
var opts ObjectOptions
|
||||
opts, err = putOpts(ctx, r, bucket, object, metadata)
|
||||
@@ -784,7 +819,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
pReader = NewPutObjReader(rawReader, hashReader, objectEncryptionKey)
|
||||
pReader = NewPutObjReader(rawReader, hashReader, &objectEncryptionKey)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -794,9 +829,17 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
location := getObjectLocation(r, globalDomainNames, bucket, object)
|
||||
// We must not use the http.Header().Set method here because some (broken)
|
||||
// clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive).
|
||||
// Therefore, we have to set the ETag directly as map entry.
|
||||
w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`}
|
||||
w.Header().Set(xhttp.Location, location)
|
||||
|
||||
// Set the relevant version ID as part of the response header.
|
||||
if objInfo.VersionID != "" {
|
||||
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
|
||||
}
|
||||
|
||||
w.Header().Set(xhttp.Location, getObjectLocation(r, globalDomainNames, bucket, object))
|
||||
|
||||
// Notify object created event.
|
||||
defer sendEvent(eventArgs{
|
||||
@@ -823,9 +866,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
Bucket: objInfo.Bucket,
|
||||
Key: objInfo.Name,
|
||||
ETag: `"` + objInfo.ETag + `"`,
|
||||
Location: location,
|
||||
Location: w.Header().Get(xhttp.Location),
|
||||
})
|
||||
writeResponse(w, http.StatusCreated, resp, "application/xml")
|
||||
writeResponse(w, http.StatusCreated, resp, mimeXML)
|
||||
case "200":
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
default:
|
||||
@@ -883,88 +926,66 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
|
||||
// Verify if the caller has sufficient permissions.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
forceDelete := false
|
||||
if value := r.Header.Get(xhttp.MinIOForceDelete); value != "" {
|
||||
var err error
|
||||
forceDelete, err = strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrInvalidRequest)
|
||||
apiErr.Description = err.Error()
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// if force delete header is set, we need to evaluate the policy anyways
|
||||
// regardless of it being true or not.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ForceDeleteBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if forceDelete {
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deleteBucket := objectAPI.DeleteBucket
|
||||
|
||||
// Attempt to delete bucket.
|
||||
if err := deleteBucket(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
if err := deleteBucket(ctx, bucket, forceDelete); err != nil {
|
||||
if _, ok := err.(BucketNotEmpty); ok && (globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket)) {
|
||||
apiErr := toAPIError(ctx, err)
|
||||
apiErr.Description = "The bucket you tried to delete is not empty. You must delete all versions in the bucket."
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
} else {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
// Deleting DNS entry failed, attempt to create the bucket again.
|
||||
objectAPI.MakeBucketWithLocation(ctx, bucket, "")
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually using etcdctl", err))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
globalNotificationSys.DeleteBucket(ctx, bucket)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// PutBucketVersioningHandler - PUT Bucket Versioning.
|
||||
// ----------
|
||||
// No-op. Available for API compatibility.
|
||||
func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketVersioning")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketVersioning", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketVersioningHandler - GET Bucket Versioning.
|
||||
// ----------
|
||||
// No-op. Available for API compatibility.
|
||||
func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketVersioning")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketVersioning", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, []byte(getBucketVersioningResponse))
|
||||
}
|
||||
|
||||
// PutBucketObjectLockConfigHandler - PUT Bucket object lock configuration.
|
||||
// ----------
|
||||
// Places an Object Lock configuration on the specified bucket. The rule
|
||||
@@ -984,15 +1005,11 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
config, err := objectlock.ParseObjectLockConfig(r.Body)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
@@ -1000,39 +1017,23 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
aerr := toAPIError(ctx, err)
|
||||
if err == errConfigNotFound {
|
||||
aerr = errorCodes.ToAPIErr(ErrObjectLockConfigurationNotAllowed)
|
||||
}
|
||||
writeErrorResponse(ctx, w, aerr, r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if string(configData) != bucketObjectLockEnabledConfig {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
data, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
// Deny object locking configuration settings on existing buckets without object lock enabled.
|
||||
if _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
configFile = path.Join(bucketConfigPrefix, bucket, objectLockConfig)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, data); err != nil {
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, objectLockConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if config.Rule != nil {
|
||||
retention := config.ToRetention()
|
||||
globalBucketObjectLockConfig.Set(bucket, retention)
|
||||
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, retention)
|
||||
} else {
|
||||
globalBucketObjectLockConfig.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketObjectLockConfig(ctx, bucket)
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
@@ -1056,43 +1057,137 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// check if user has permissions to perform this operation
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
|
||||
config, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
|
||||
if err != nil {
|
||||
var aerr APIError
|
||||
if err == errConfigNotFound {
|
||||
aerr = errorCodes.ToAPIErr(ErrMethodNotAllowed)
|
||||
} else {
|
||||
aerr = toAPIError(ctx, err)
|
||||
}
|
||||
writeErrorResponse(ctx, w, aerr, r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if string(configData) != bucketObjectLockEnabledConfig {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configFile = path.Join(bucketConfigPrefix, bucket, objectLockConfig)
|
||||
configData, err = readConfig(ctx, objectAPI, configFile)
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
if err != errConfigNotFound {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if configData, err = xml.Marshal(objectlock.NewObjectLockConfig()); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, configData)
|
||||
}
|
||||
|
||||
// PutBucketTaggingHandler - PUT Bucket tagging.
|
||||
// ----------
|
||||
func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketTagging")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketTagging", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketTaggingAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
tags, err := tags.ParseBucketXML(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
apiErr.Description = err.Error()
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(tags)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTaggingConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketTaggingHandler - GET Bucket tagging.
|
||||
// ----------
|
||||
func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketTagging")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketTagging", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// check if user has permissions to perform this operation
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketTaggingAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, configData)
|
||||
}
|
||||
|
||||
// DeleteBucketTaggingHandler - DELETE Bucket tagging.
|
||||
// ----------
|
||||
func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketTagging")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketTagging", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketTaggingAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketTaggingConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
@@ -18,8 +18,8 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -29,7 +29,52 @@ import (
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
)
|
||||
|
||||
// Wrapper for calling GetBucketPolicy HTTP handler tests for both XL multiple disks and single node setup.
|
||||
// Wrapper for calling RemoveBucket HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestRemoveBucketHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"})
|
||||
}
|
||||
|
||||
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewBuffer([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Error uploading object: <ERROR> %v", err)
|
||||
}
|
||||
|
||||
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
// construct HTTP request for DELETE bucket.
|
||||
req, err := newTestSignedRequestV4("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
|
||||
}
|
||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||
// Call the ServeHTTP to execute the handler.
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
switch rec.Code {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent:
|
||||
t.Fatalf("Test %v: expected failure, but succeeded with %v", instanceType, rec.Code)
|
||||
}
|
||||
|
||||
// Verify response of the V2 signed HTTP request.
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for DELETE bucket.
|
||||
reqV2, err := newTestSignedRequestV2("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
|
||||
}
|
||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||
// Call the ServeHTTP to execute the handler.
|
||||
apiRouter.ServeHTTP(recV2, reqV2)
|
||||
switch recV2.Code {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent:
|
||||
t.Fatalf("Test %v: expected failure, but succeeded with %v", instanceType, recV2.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling GetBucketPolicy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestGetBucketLocationHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"})
|
||||
}
|
||||
@@ -71,7 +116,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidAccessKeyId",
|
||||
Message: "The access key ID you provided does not exist in our records.",
|
||||
Message: "The Access Key Id you provided does not exist in our records.",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
@@ -173,7 +218,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
|
||||
}
|
||||
|
||||
// Wrapper for calling HeadBucket HTTP handler tests for both XL multiple disks and single node setup.
|
||||
// Wrapper for calling HeadBucket HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestHeadBucketHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testHeadBucketHandler, []string{"HeadBucket"})
|
||||
}
|
||||
@@ -278,7 +323,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
|
||||
}
|
||||
|
||||
// Wrapper for calling TestListMultipartUploadsHandler tests for both XL multiple disks and single node setup.
|
||||
// Wrapper for calling TestListMultipartUploadsHandler tests for both Erasure multiple disks and single node setup.
|
||||
func TestListMultipartUploadsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testListMultipartUploadsHandler, []string{"ListMultipartUploads"})
|
||||
}
|
||||
@@ -515,7 +560,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
|
||||
}
|
||||
|
||||
// Wrapper for calling TestListBucketsHandler tests for both XL multiple disks and single node setup.
|
||||
// Wrapper for calling TestListBucketsHandler tests for both Erasure multiple disks and single node setup.
|
||||
func TestListBucketsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testListBucketsHandler, []string{"ListBuckets"})
|
||||
}
|
||||
@@ -609,7 +654,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
ExecObjectLayerAPINilTest(t, "", "", instanceType, apiRouter, nilReq)
|
||||
}
|
||||
|
||||
// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both XL multiple disks and single node setup.
|
||||
// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects"})
|
||||
}
|
||||
@@ -625,7 +670,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
for i := 0; i < 10; i++ {
|
||||
objectName := "test-object-" + strconv.Itoa(i)
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
|
||||
_, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
||||
@@ -635,14 +680,17 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
objectNames = append(objectNames, objectName)
|
||||
}
|
||||
|
||||
getObjectIdentifierList := func(objectNames []string) (objectIdentifierList []ObjectIdentifier) {
|
||||
getObjectToDeleteList := func(objectNames []string) (objectList []ObjectToDelete) {
|
||||
for _, objectName := range objectNames {
|
||||
objectIdentifierList = append(objectIdentifierList, ObjectIdentifier{objectName})
|
||||
objectList = append(objectList, ObjectToDelete{
|
||||
ObjectName: objectName,
|
||||
})
|
||||
}
|
||||
|
||||
return objectIdentifierList
|
||||
return objectList
|
||||
}
|
||||
getDeleteErrorList := func(objects []ObjectIdentifier) (deleteErrorList []DeleteError) {
|
||||
|
||||
getDeleteErrorList := func(objects []ObjectToDelete) (deleteErrorList []DeleteError) {
|
||||
for _, obj := range objects {
|
||||
deleteErrorList = append(deleteErrorList, DeleteError{
|
||||
Code: errorCodes[ErrAccessDenied].Code,
|
||||
@@ -655,22 +703,38 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
}
|
||||
|
||||
requestList := []DeleteObjectsRequest{
|
||||
{Quiet: false, Objects: getObjectIdentifierList(objectNames[:5])},
|
||||
{Quiet: true, Objects: getObjectIdentifierList(objectNames[5:])},
|
||||
{Quiet: false, Objects: getObjectToDeleteList(objectNames[:5])},
|
||||
{Quiet: true, Objects: getObjectToDeleteList(objectNames[5:])},
|
||||
}
|
||||
|
||||
// generate multi objects delete response.
|
||||
successRequest0 := encodeResponse(requestList[0])
|
||||
successResponse0 := generateMultiDeleteResponse(requestList[0].Quiet, requestList[0].Objects, nil)
|
||||
|
||||
deletedObjects := make([]DeletedObject, len(requestList[0].Objects))
|
||||
for i := range requestList[0].Objects {
|
||||
deletedObjects[i] = DeletedObject{
|
||||
ObjectName: requestList[0].Objects[i].ObjectName,
|
||||
}
|
||||
}
|
||||
|
||||
successResponse0 := generateMultiDeleteResponse(requestList[0].Quiet, deletedObjects, nil)
|
||||
encodedSuccessResponse0 := encodeResponse(successResponse0)
|
||||
|
||||
successRequest1 := encodeResponse(requestList[1])
|
||||
successResponse1 := generateMultiDeleteResponse(requestList[1].Quiet, requestList[1].Objects, nil)
|
||||
|
||||
deletedObjects = make([]DeletedObject, len(requestList[1].Objects))
|
||||
for i := range requestList[0].Objects {
|
||||
deletedObjects[i] = DeletedObject{
|
||||
ObjectName: requestList[1].Objects[i].ObjectName,
|
||||
}
|
||||
}
|
||||
|
||||
successResponse1 := generateMultiDeleteResponse(requestList[1].Quiet, deletedObjects, nil)
|
||||
encodedSuccessResponse1 := encodeResponse(successResponse1)
|
||||
|
||||
// generate multi objects delete response for errors.
|
||||
// errorRequest := encodeResponse(requestList[1])
|
||||
errorResponse := generateMultiDeleteResponse(requestList[1].Quiet, requestList[1].Objects, nil)
|
||||
errorResponse := generateMultiDeleteResponse(requestList[1].Quiet, deletedObjects, nil)
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
|
||||
anonRequest := encodeResponse(requestList[0])
|
||||
@@ -773,6 +837,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
|
||||
// Verify whether the bucket obtained object is same as the one created.
|
||||
if testCase.expectedContent != nil && !bytes.Equal(testCase.expectedContent, actualContent) {
|
||||
fmt.Println(string(testCase.expectedContent), string(actualContent))
|
||||
t.Errorf("Test %d : MinIO %s: Object content differs from expected value.", i+1, instanceType)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,13 +72,22 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err = objAPI.SetBucketLifecycle(ctx, bucket, bucketLifecycle); err != nil {
|
||||
// Validate the received bucket policy document
|
||||
if err = bucketLifecycle.Validate(); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalLifecycleSys.Set(bucket, *bucketLifecycle)
|
||||
globalNotificationSys.SetBucketLifecycle(ctx, bucket, bucketLifecycle)
|
||||
configData, err := xml.Marshal(bucketLifecycle)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketLifecycleConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Success.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
@@ -110,21 +119,20 @@ func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
// Read bucket access lifecycle.
|
||||
bucketLifecycle, err := objAPI.GetBucketLifecycle(ctx, bucket)
|
||||
config, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
lifecycleData, err := xml.Marshal(bucketLifecycle)
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write lifecycle configuration to client.
|
||||
writeSuccessResponseXML(w, lifecycleData)
|
||||
writeSuccessResponseXML(w, configData)
|
||||
}
|
||||
|
||||
// DeleteBucketLifecycleHandler - This HTTP handler removes bucket lifecycle configuration.
|
||||
@@ -153,14 +161,11 @@ func (api objectAPIHandlers) DeleteBucketLifecycleHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
if err := objAPI.DeleteBucketLifecycle(ctx, bucket); err != nil {
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketLifecycleConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalLifecycleSys.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketLifecycle(ctx, bucket)
|
||||
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
303
cmd/bucket-lifecycle-handlers_test.go
Normal file
303
cmd/bucket-lifecycle-handlers_test.go
Normal file
@@ -0,0 +1,303 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
)
|
||||
|
||||
// Test S3 Bucket lifecycle APIs with wrong credentials
|
||||
func TestBucketLifecycleWrongCredentials(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testBucketLifecycleHandlersWrongCredentials, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
|
||||
}
|
||||
|
||||
// Test for authentication
|
||||
func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
method string
|
||||
bucketName string
|
||||
accessKey string
|
||||
secretKey string
|
||||
// Sent body
|
||||
body []byte
|
||||
// Expected response
|
||||
expectedRespStatus int
|
||||
lifecycleResponse []byte
|
||||
errorResponse APIErrorResponse
|
||||
shouldPass bool
|
||||
}{
|
||||
// GET empty credentials
|
||||
{
|
||||
method: "GET", bucketName: bucketName,
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
lifecycleResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "AccessDenied",
|
||||
Message: "Access Denied.",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
// GET wrong credentials
|
||||
{
|
||||
method: "GET", bucketName: bucketName,
|
||||
accessKey: "abcd",
|
||||
secretKey: "abcd",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
lifecycleResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidAccessKeyId",
|
||||
Message: "The Access Key Id you provided does not exist in our records.",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
// PUT empty credentials
|
||||
{
|
||||
method: "PUT",
|
||||
bucketName: bucketName,
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
lifecycleResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "AccessDenied",
|
||||
Message: "Access Denied.",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
// PUT wrong credentials
|
||||
{
|
||||
method: "PUT",
|
||||
bucketName: bucketName,
|
||||
accessKey: "abcd",
|
||||
secretKey: "abcd",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
lifecycleResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidAccessKeyId",
|
||||
Message: "The Access Key Id you provided does not exist in our records.",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
// DELETE empty credentials
|
||||
{
|
||||
method: "DELETE",
|
||||
bucketName: bucketName,
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
lifecycleResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "AccessDenied",
|
||||
Message: "Access Denied.",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
// DELETE wrong credentials
|
||||
{
|
||||
method: "DELETE",
|
||||
bucketName: bucketName,
|
||||
accessKey: "abcd",
|
||||
secretKey: "abcd",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
lifecycleResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidAccessKeyId",
|
||||
Message: "The Access Key Id you provided does not exist in our records.",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
}
|
||||
|
||||
testBucketLifecycle(obj, instanceType, bucketName, apiRouter, t, testCases)
|
||||
}
|
||||
|
||||
// Test S3 Bucket lifecycle APIs
|
||||
func TestBucketLifecycle(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testBucketLifecycleHandlers, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
|
||||
}
|
||||
|
||||
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
|
||||
// Tests are related and the order is important.
|
||||
func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
creds auth.Credentials, t *testing.T) {
|
||||
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
method string
|
||||
bucketName string
|
||||
accessKey string
|
||||
secretKey string
|
||||
// Sent body
|
||||
body []byte
|
||||
// Expected response
|
||||
expectedRespStatus int
|
||||
lifecycleResponse []byte
|
||||
errorResponse APIErrorResponse
|
||||
shouldPass bool
|
||||
}{
|
||||
// Test case - 1.
|
||||
// Filter contains more than (Prefix,Tag,And) rule
|
||||
{
|
||||
method: "PUT",
|
||||
bucketName: bucketName,
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
body: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix><Tag><Key>Key1</Key><Value>Value1</Value></Tag></Filter><Status>Enabled</Status><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
lifecycleResponse: []byte(``),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidRequest",
|
||||
Message: "Filter must have exactly one of Prefix, Tag, or And specified",
|
||||
},
|
||||
|
||||
shouldPass: false,
|
||||
},
|
||||
// Date contains wrong format
|
||||
{
|
||||
method: "PUT",
|
||||
bucketName: bucketName,
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
body: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix><Tag><Key>Key1</Key><Value>Value1</Value></Tag></Filter><Status>Enabled</Status><Expiration><Date>365</Date></Expiration></Rule></LifecycleConfiguration>`),
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
lifecycleResponse: []byte(``),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidRequest",
|
||||
Message: "Date must be provided in ISO 8601 format",
|
||||
},
|
||||
|
||||
shouldPass: false,
|
||||
},
|
||||
{
|
||||
method: "PUT",
|
||||
bucketName: bucketName,
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
body: []byte(`<?xml version="1.0" encoding="UTF-8"?><LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix></Filter><Status>Enabled</Status><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
|
||||
expectedRespStatus: http.StatusOK,
|
||||
lifecycleResponse: []byte(``),
|
||||
errorResponse: APIErrorResponse{},
|
||||
shouldPass: true,
|
||||
},
|
||||
{
|
||||
method: "GET",
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
bucketName: bucketName,
|
||||
body: []byte(``),
|
||||
expectedRespStatus: http.StatusOK,
|
||||
lifecycleResponse: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Status>Enabled</Status><Filter><Prefix>logs/</Prefix></Filter><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
|
||||
errorResponse: APIErrorResponse{},
|
||||
shouldPass: true,
|
||||
},
|
||||
{
|
||||
method: "DELETE",
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
bucketName: bucketName,
|
||||
body: []byte(``),
|
||||
expectedRespStatus: http.StatusNoContent,
|
||||
lifecycleResponse: []byte(``),
|
||||
errorResponse: APIErrorResponse{},
|
||||
shouldPass: true,
|
||||
},
|
||||
{
|
||||
method: "GET",
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
bucketName: bucketName,
|
||||
body: []byte(``),
|
||||
expectedRespStatus: http.StatusNotFound,
|
||||
lifecycleResponse: []byte(``),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "NoSuchLifecycleConfiguration",
|
||||
Message: "The lifecycle configuration does not exist",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
}
|
||||
|
||||
testBucketLifecycle(obj, instanceType, bucketName, apiRouter, t, testCases)
|
||||
}
|
||||
|
||||
// testBucketLifecycle is a generic testing of lifecycle requests
|
||||
func testBucketLifecycle(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
t *testing.T, testCases []struct {
|
||||
method string
|
||||
bucketName string
|
||||
accessKey string
|
||||
secretKey string
|
||||
body []byte
|
||||
expectedRespStatus int
|
||||
lifecycleResponse []byte
|
||||
errorResponse APIErrorResponse
|
||||
shouldPass bool
|
||||
}) {
|
||||
|
||||
for i, testCase := range testCases {
|
||||
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
// construct HTTP request
|
||||
req, err := newTestSignedRequestV4(testCase.method, getBucketLifecycleURL("", testCase.bucketName),
|
||||
int64(len(testCase.body)), bytes.NewReader(testCase.body), testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||
// Call the ServeHTTP to execute the handler.
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
if rec.Code != testCase.expectedRespStatus {
|
||||
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
|
||||
}
|
||||
if testCase.shouldPass && !bytes.Equal(testCase.lifecycleResponse, rec.Body.Bytes()) {
|
||||
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.lifecycleResponse), rec.Body.String())
|
||||
}
|
||||
errorResponse := APIErrorResponse{}
|
||||
err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse)
|
||||
if err != nil && !testCase.shouldPass {
|
||||
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, rec.Body.String())
|
||||
}
|
||||
if errorResponse.Resource != testCase.errorResponse.Resource {
|
||||
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
|
||||
}
|
||||
if errorResponse.Message != testCase.errorResponse.Message {
|
||||
t.Errorf("Test %d: %s: Expected the error message to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Message, errorResponse.Message)
|
||||
}
|
||||
if errorResponse.Code != testCase.errorResponse.Code {
|
||||
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
48
cmd/bucket-lifecycle.go
Normal file
48
cmd/bucket-lifecycle.go
Normal file
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
)
|
||||
|
||||
const (
|
||||
// Disabled means the lifecycle rule is inactive
|
||||
Disabled = "Disabled"
|
||||
)
|
||||
|
||||
// LifecycleSys - Bucket lifecycle subsystem.
|
||||
type LifecycleSys struct{}
|
||||
|
||||
// Get - gets lifecycle config associated to a given bucket name.
|
||||
func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
return nil, BucketLifecycleNotFound{Bucket: bucketName}
|
||||
}
|
||||
|
||||
return globalBucketMetadataSys.GetLifecycleConfig(bucketName)
|
||||
}
|
||||
|
||||
// NewLifecycleSys - creates new lifecycle system.
|
||||
func NewLifecycleSys() *LifecycleSys {
|
||||
return &LifecycleSys{}
|
||||
}
|
||||
@@ -17,7 +17,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
@@ -49,13 +52,13 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// ListBucketObjectVersions - GET Bucket Object versions
|
||||
// ListObjectVersions - GET Bucket Object versions
|
||||
// You can use the versions subresource to list metadata about all
|
||||
// of the versions of objects in a bucket.
|
||||
func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBucketObjectVersions")
|
||||
func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectVersions")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListBucketObjectVersions", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(w, r, "ListObjectVersions", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -74,8 +77,7 @@ func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWrit
|
||||
urlValues := r.URL.Query()
|
||||
|
||||
// Extract all the listBucketVersions query params to their native values.
|
||||
// versionIDMarker is ignored here.
|
||||
prefix, marker, delimiter, maxkeys, encodingType, _, errCode := getListBucketObjectVersionsArgs(urlValues)
|
||||
prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode := getListBucketObjectVersionsArgs(urlValues)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -87,40 +89,29 @@ func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWrit
|
||||
return
|
||||
}
|
||||
|
||||
listObjects := objectAPI.ListObjects
|
||||
listObjectVersions := objectAPI.ListObjectVersions
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// Inititate a list object versions operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshaled into S3 compatible XML header.
|
||||
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxkeys)
|
||||
listObjectVersionsInfo, err := listObjectVersions(ctx, bucket, prefix, marker, versionIDMarker, delimiter, maxkeys)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsInfo.Objects {
|
||||
var actualSize int64
|
||||
if listObjectsInfo.Objects[i].IsCompressed() {
|
||||
// Read the decompressed size from the meta.json.
|
||||
actualSize = listObjectsInfo.Objects[i].GetActualSize()
|
||||
if actualSize < 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDecompressedSize),
|
||||
r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Set the info.Size to the actualSize.
|
||||
listObjectsInfo.Objects[i].Size = actualSize
|
||||
} else if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
||||
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
|
||||
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].DecryptedSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
for i := range listObjectVersionsInfo.Objects {
|
||||
if crypto.IsEncrypted(listObjectVersionsInfo.Objects[i].UserDefined) {
|
||||
listObjectVersionsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectVersionsInfo.Objects[i], false)
|
||||
}
|
||||
listObjectVersionsInfo.Objects[i].Size, err = listObjectVersionsInfo.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType, maxkeys, listObjectsInfo)
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
@@ -169,6 +160,13 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
// Analyze continuation token and route the request accordingly
|
||||
var success bool
|
||||
token, success = proxyRequestByToken(ctx, w, r, token)
|
||||
if success {
|
||||
return
|
||||
}
|
||||
|
||||
listObjectsV2 := objectAPI.ListObjectsV2
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
@@ -181,28 +179,23 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
}
|
||||
|
||||
for i := range listObjectsV2Info.Objects {
|
||||
var actualSize int64
|
||||
if listObjectsV2Info.Objects[i].IsCompressed() {
|
||||
// Read the decompressed size from the meta.json.
|
||||
actualSize = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if actualSize < 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDecompressedSize), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Set the info.Size to the actualSize.
|
||||
listObjectsV2Info.Objects[i].Size = actualSize
|
||||
} else if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].DecryptedSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token,
|
||||
listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
if nextContinuationToken != "" && listObjectsV2Info.IsTruncated {
|
||||
nextContinuationToken = fmt.Sprintf("%s@%d", listObjectsV2Info.NextContinuationToken, getLocalNodeIndex())
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, nextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, true)
|
||||
|
||||
@@ -253,6 +246,13 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
// Analyze continuation token and route the request accordingly
|
||||
var success bool
|
||||
token, success = proxyRequestByToken(ctx, w, r, token)
|
||||
if success {
|
||||
return
|
||||
}
|
||||
|
||||
listObjectsV2 := objectAPI.ListObjectsV2
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
@@ -265,28 +265,23 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
for i := range listObjectsV2Info.Objects {
|
||||
var actualSize int64
|
||||
if listObjectsV2Info.Objects[i].IsCompressed() {
|
||||
// Read the decompressed size from the meta.json.
|
||||
actualSize = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if actualSize < 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDecompressedSize), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Set the info.Size to the actualSize.
|
||||
listObjectsV2Info.Objects[i].Size = actualSize
|
||||
} else if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].DecryptedSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token,
|
||||
listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
if nextContinuationToken != "" && listObjectsV2Info.IsTruncated {
|
||||
nextContinuationToken = fmt.Sprintf("%s@%d", listObjectsV2Info.NextContinuationToken, getLocalNodeIndex())
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, nextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, false)
|
||||
|
||||
@@ -294,6 +289,60 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
}
|
||||
|
||||
func getLocalNodeIndex() int {
|
||||
if len(globalProxyEndpoints) == 0 {
|
||||
return -1
|
||||
}
|
||||
for i, ep := range globalProxyEndpoints {
|
||||
if ep.IsLocal {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func parseRequestToken(token string) (subToken string, nodeIndex int) {
|
||||
if token == "" {
|
||||
return token, -1
|
||||
}
|
||||
i := strings.Index(token, "@")
|
||||
if i < 0 {
|
||||
return token, -1
|
||||
}
|
||||
nodeIndex, err := strconv.Atoi(token[i+1:])
|
||||
if err != nil {
|
||||
return token, -1
|
||||
}
|
||||
subToken = token[:i]
|
||||
return subToken, nodeIndex
|
||||
}
|
||||
|
||||
func proxyRequestByToken(ctx context.Context, w http.ResponseWriter, r *http.Request, token string) (string, bool) {
|
||||
subToken, nodeIndex := parseRequestToken(token)
|
||||
if nodeIndex > 0 {
|
||||
return subToken, proxyRequestByNodeIndex(ctx, w, r, nodeIndex)
|
||||
}
|
||||
return subToken, false
|
||||
}
|
||||
|
||||
func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int) (success bool) {
|
||||
if len(globalProxyEndpoints) == 0 {
|
||||
return false
|
||||
}
|
||||
if index < 0 || index >= len(globalProxyEndpoints) {
|
||||
return false
|
||||
}
|
||||
ep := globalProxyEndpoints[index]
|
||||
if ep.IsLocal {
|
||||
return false
|
||||
}
|
||||
return proxyRequest(ctx, w, r, ep)
|
||||
}
|
||||
|
||||
func proxyRequestByBucket(ctx context.Context, w http.ResponseWriter, r *http.Request, bucket string) (success bool) {
|
||||
return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(bucket, len(globalProxyEndpoints)))
|
||||
}
|
||||
|
||||
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
@@ -332,6 +381,10 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
if proxyRequestByBucket(ctx, w, r, bucket) {
|
||||
return
|
||||
}
|
||||
|
||||
listObjects := objectAPI.ListObjects
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
@@ -344,25 +397,16 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
for i := range listObjectsInfo.Objects {
|
||||
var actualSize int64
|
||||
if listObjectsInfo.Objects[i].IsCompressed() {
|
||||
// Read the decompressed size from the meta.json.
|
||||
actualSize = listObjectsInfo.Objects[i].GetActualSize()
|
||||
if actualSize < 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDecompressedSize), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Set the info.Size to the actualSize.
|
||||
listObjectsInfo.Objects[i].Size = actualSize
|
||||
} else if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
||||
if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
||||
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
|
||||
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].DecryptedSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
|
||||
// Write success response.
|
||||
412
cmd/bucket-metadata-sys.go
Normal file
412
cmd/bucket-metadata-sys.go
Normal file
@@ -0,0 +1,412 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
// BucketMetadataSys captures all bucket metadata for a given cluster.
|
||||
type BucketMetadataSys struct {
|
||||
sync.RWMutex
|
||||
metadataMap map[string]BucketMetadata
|
||||
}
|
||||
|
||||
// Remove bucket metadata from memory.
|
||||
func (sys *BucketMetadataSys) Remove(bucket string) {
|
||||
if globalIsGateway {
|
||||
return
|
||||
}
|
||||
sys.Lock()
|
||||
delete(sys.metadataMap, bucket)
|
||||
sys.Unlock()
|
||||
}
|
||||
|
||||
// Set - sets a new metadata in-memory.
|
||||
// Only a shallow copy is saved and fields with references
|
||||
// cannot be modified without causing a race condition,
|
||||
// so they should be replaced atomically and not appended to, etc.
|
||||
// Data is not persisted to disk.
|
||||
func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) {
|
||||
if globalIsGateway {
|
||||
return
|
||||
}
|
||||
|
||||
if bucket != minioMetaBucket {
|
||||
sys.Lock()
|
||||
sys.metadataMap[bucket] = meta
|
||||
sys.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Update update bucket metadata for the specified config file.
|
||||
// The configData data should not be modified after being sent here.
|
||||
func (sys *BucketMetadataSys) Update(bucket string, configFile string, configData []byte) error {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
if globalIsGateway {
|
||||
// This code is needed only for gateway implementations.
|
||||
switch configFile {
|
||||
case bucketSSEConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.EncryptionConfigXML = configData
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketLifecycleConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.LifecycleConfigXML = configData
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketTaggingConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.TaggingConfigXML = configData
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketNotificationConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.NotificationConfigXML = configData
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketPolicyConfig:
|
||||
if configData == nil {
|
||||
return objAPI.DeleteBucketPolicy(GlobalContext, bucket)
|
||||
}
|
||||
config, err := policy.ParseConfig(bytes.NewReader(configData), bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return objAPI.SetBucketPolicy(GlobalContext, bucket, config)
|
||||
}
|
||||
return NotImplemented{}
|
||||
}
|
||||
|
||||
if bucket == minioMetaBucket {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch configFile {
|
||||
case bucketPolicyConfig:
|
||||
meta.PolicyConfigJSON = configData
|
||||
case bucketNotificationConfig:
|
||||
meta.NotificationConfigXML = configData
|
||||
case bucketLifecycleConfig:
|
||||
meta.LifecycleConfigXML = configData
|
||||
case bucketSSEConfig:
|
||||
meta.EncryptionConfigXML = configData
|
||||
case bucketTaggingConfig:
|
||||
meta.TaggingConfigXML = configData
|
||||
case objectLockConfig:
|
||||
meta.ObjectLockConfigXML = configData
|
||||
case bucketVersioningConfig:
|
||||
meta.VersioningConfigXML = configData
|
||||
case bucketQuotaConfigFile:
|
||||
meta.QuotaConfigJSON = configData
|
||||
default:
|
||||
return fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
}
|
||||
|
||||
if err := meta.Save(GlobalContext, objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sys.Set(bucket, meta)
|
||||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get metadata for a bucket.
|
||||
// If no metadata exists errConfigNotFound is returned and a new metadata is returned.
|
||||
// Only a shallow copy is returned, so referenced data should not be modified,
|
||||
// but can be replaced atomically.
|
||||
func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
|
||||
if globalIsGateway || bucket == minioMetaBucket {
|
||||
return newBucketMetadata(bucket), errConfigNotFound
|
||||
}
|
||||
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
|
||||
meta, ok := sys.metadataMap[bucket]
|
||||
if !ok {
|
||||
return newBucketMetadata(bucket), errConfigNotFound
|
||||
}
|
||||
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// GetVersioningConfig returns configured versioning config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Versioning, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return meta.versioningConfig, nil
|
||||
}
|
||||
|
||||
// GetTaggingConfig returns configured tagging config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketTaggingNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if meta.taggingConfig == nil {
|
||||
return nil, BucketTaggingNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.taggingConfig, nil
|
||||
}
|
||||
|
||||
// GetObjectLockConfig returns configured object lock config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Config, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if meta.objectLockConfig == nil {
|
||||
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.objectLockConfig, nil
|
||||
}
|
||||
|
||||
// GetLifecycleConfig returns configured lifecycle config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Lifecycle, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketLifecycleNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if meta.lifecycleConfig == nil {
|
||||
return nil, BucketLifecycleNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.lifecycleConfig, nil
|
||||
}
|
||||
|
||||
// GetNotificationConfig returns configured notification config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Config, error) {
|
||||
if globalIsGateway && globalGatewayName == "nas" {
|
||||
// Only needed in case of NAS gateway.
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return meta.notificationConfig, nil
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return meta.notificationConfig, nil
|
||||
}
|
||||
|
||||
// GetSSEConfig returns configured SSE config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if meta.sseConfig == nil {
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.sseConfig, nil
|
||||
}
|
||||
|
||||
// GetPolicyConfig returns configured bucket policy
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
return objAPI.GetBucketPolicy(GlobalContext, bucket)
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if meta.policyConfig == nil {
|
||||
return nil, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.policyConfig, nil
|
||||
}
|
||||
|
||||
// GetQuotaConfig returns configured bucket quota
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetQuotaConfig(bucket string) (*madmin.BucketQuota, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return meta.quotaConfig, nil
|
||||
}
|
||||
|
||||
// GetConfig returns the current bucket metadata
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetConfig(bucket string) (BucketMetadata, error) {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
return newBucketMetadata(bucket), errServerNotInitialized
|
||||
}
|
||||
|
||||
if globalIsGateway {
|
||||
return newBucketMetadata(bucket), NotImplemented{}
|
||||
}
|
||||
|
||||
if bucket == minioMetaBucket {
|
||||
return newBucketMetadata(bucket), errInvalidArgument
|
||||
}
|
||||
|
||||
sys.RLock()
|
||||
meta, ok := sys.metadataMap[bucket]
|
||||
sys.RUnlock()
|
||||
if ok {
|
||||
return meta, nil
|
||||
}
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return meta, err
|
||||
}
|
||||
sys.Lock()
|
||||
sys.metadataMap[bucket] = meta
|
||||
sys.Unlock()
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// Init - initializes bucket metadata system for all buckets.
|
||||
func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// In gateway mode, we don't need to load the policies
|
||||
// from the backend.
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load PolicySys once during boot.
|
||||
return sys.load(ctx, buckets, objAPI)
|
||||
}
|
||||
|
||||
// concurrently load bucket metadata to speed up loading bucket metadata.
|
||||
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
g := errgroup.WithNErrs(len(buckets))
|
||||
for index := range buckets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, buckets[index].Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sys.Lock()
|
||||
sys.metadataMap[buckets[index].Name] = meta
|
||||
sys.Unlock()
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loads bucket metadata for all buckets into BucketMetadataSys.
|
||||
func (sys *BucketMetadataSys) load(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
count := 100 // load 100 bucket metadata at a time.
|
||||
for {
|
||||
if len(buckets) < count {
|
||||
return sys.concurrentLoad(ctx, buckets, objAPI)
|
||||
}
|
||||
if err := sys.concurrentLoad(ctx, buckets[:count], objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
buckets = buckets[count:]
|
||||
}
|
||||
}
|
||||
|
||||
// NewBucketMetadataSys - creates new policy system.
|
||||
func NewBucketMetadataSys() *BucketMetadataSys {
|
||||
return &BucketMetadataSys{
|
||||
metadataMap: make(map[string]BucketMetadata),
|
||||
}
|
||||
}
|
||||
337
cmd/bucket-metadata.go
Normal file
337
cmd/bucket-metadata.go
Normal file
@@ -0,0 +1,337 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
legacyBucketObjectLockEnabledConfigFile = "object-lock-enabled.json"
|
||||
legacyBucketObjectLockEnabledConfig = `{"x-amz-bucket-object-lock-enabled":true}`
|
||||
|
||||
bucketMetadataFile = ".metadata.bin"
|
||||
bucketMetadataFormat = 1
|
||||
bucketMetadataVersion = 1
|
||||
)
|
||||
|
||||
var (
|
||||
enabledBucketObjectLockConfig = []byte(`<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><ObjectLockEnabled>Enabled</ObjectLockEnabled></ObjectLockConfiguration>`)
|
||||
enabledBucketVersioningConfig = []byte(`<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>Enabled</Status></VersioningConfiguration>`)
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE
|
||||
|
||||
// BucketMetadata contains bucket metadata.
|
||||
// When adding/removing fields, regenerate the marshal code using the go generate above.
|
||||
// Only changing meaning of fields requires a version bump.
|
||||
// bucketMetadataFormat refers to the format.
|
||||
// bucketMetadataVersion can be used to track a rolling upgrade of a field.
|
||||
type BucketMetadata struct {
|
||||
Name string
|
||||
Created time.Time
|
||||
LockEnabled bool // legacy not used anymore.
|
||||
PolicyConfigJSON []byte
|
||||
NotificationConfigXML []byte
|
||||
LifecycleConfigXML []byte
|
||||
ObjectLockConfigXML []byte
|
||||
VersioningConfigXML []byte
|
||||
EncryptionConfigXML []byte
|
||||
TaggingConfigXML []byte
|
||||
QuotaConfigJSON []byte
|
||||
|
||||
// Unexported fields. Must be updated atomically.
|
||||
policyConfig *policy.Policy
|
||||
notificationConfig *event.Config
|
||||
lifecycleConfig *lifecycle.Lifecycle
|
||||
objectLockConfig *objectlock.Config
|
||||
versioningConfig *versioning.Versioning
|
||||
sseConfig *bucketsse.BucketSSEConfig
|
||||
taggingConfig *tags.Tags
|
||||
quotaConfig *madmin.BucketQuota
|
||||
}
|
||||
|
||||
// newBucketMetadata creates BucketMetadata with the supplied name and Created to Now.
|
||||
func newBucketMetadata(name string) BucketMetadata {
|
||||
return BucketMetadata{
|
||||
Name: name,
|
||||
Created: UTCNow(),
|
||||
notificationConfig: &event.Config{
|
||||
XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/",
|
||||
},
|
||||
quotaConfig: &madmin.BucketQuota{},
|
||||
versioningConfig: &versioning.Versioning{
|
||||
XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Load - loads the metadata of bucket by name from ObjectLayer api.
|
||||
// If an error is returned the returned metadata will be default initialized.
|
||||
func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string) error {
|
||||
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
|
||||
data, err := readConfig(ctx, api, configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(data) <= 4 {
|
||||
return fmt.Errorf("loadBucketMetadata: no data")
|
||||
}
|
||||
// Read header
|
||||
switch binary.LittleEndian.Uint16(data[0:2]) {
|
||||
case bucketMetadataFormat:
|
||||
default:
|
||||
return fmt.Errorf("loadBucketMetadata: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
|
||||
}
|
||||
switch binary.LittleEndian.Uint16(data[2:4]) {
|
||||
case bucketMetadataVersion:
|
||||
default:
|
||||
return fmt.Errorf("loadBucketMetadata: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
|
||||
}
|
||||
|
||||
// OK, parse data.
|
||||
_, err = b.UnmarshalMsg(data[4:])
|
||||
return err
|
||||
}
|
||||
|
||||
// loadBucketMetadata loads and migrates to bucket metadata.
|
||||
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (BucketMetadata, error) {
|
||||
b := newBucketMetadata(bucket)
|
||||
err := b.Load(ctx, objectAPI, bucket)
|
||||
if err == nil {
|
||||
return b, b.convertLegacyConfigs(ctx, objectAPI)
|
||||
}
|
||||
|
||||
if err != errConfigNotFound {
|
||||
return b, err
|
||||
}
|
||||
|
||||
// Old bucket without bucket metadata. Hence we migrate existing settings.
|
||||
return b, b.convertLegacyConfigs(ctx, objectAPI)
|
||||
}
|
||||
|
||||
// parseAllConfigs will parse all configs and populate the private fields.
|
||||
// The first error encountered is returned.
|
||||
func (b *BucketMetadata) parseAllConfigs(ctx context.Context, objectAPI ObjectLayer) (err error) {
|
||||
if len(b.PolicyConfigJSON) != 0 {
|
||||
b.policyConfig, err = policy.ParseConfig(bytes.NewReader(b.PolicyConfigJSON), b.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
b.policyConfig = nil
|
||||
}
|
||||
|
||||
if len(b.NotificationConfigXML) != 0 {
|
||||
if err = xml.Unmarshal(b.NotificationConfigXML, b.notificationConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(b.LifecycleConfigXML) != 0 {
|
||||
b.lifecycleConfig, err = lifecycle.ParseLifecycleConfig(bytes.NewReader(b.LifecycleConfigXML))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
b.lifecycleConfig = nil
|
||||
}
|
||||
|
||||
if len(b.EncryptionConfigXML) != 0 {
|
||||
b.sseConfig, err = bucketsse.ParseBucketSSEConfig(bytes.NewReader(b.EncryptionConfigXML))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
b.sseConfig = nil
|
||||
}
|
||||
|
||||
if len(b.TaggingConfigXML) != 0 {
|
||||
b.taggingConfig, err = tags.ParseBucketXML(bytes.NewReader(b.TaggingConfigXML))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
b.taggingConfig = nil
|
||||
}
|
||||
|
||||
if bytes.Equal(b.ObjectLockConfigXML, enabledBucketObjectLockConfig) {
|
||||
b.VersioningConfigXML = enabledBucketVersioningConfig
|
||||
}
|
||||
|
||||
if len(b.ObjectLockConfigXML) != 0 {
|
||||
b.objectLockConfig, err = objectlock.ParseObjectLockConfig(bytes.NewReader(b.ObjectLockConfigXML))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
b.objectLockConfig = nil
|
||||
}
|
||||
|
||||
if len(b.VersioningConfigXML) != 0 {
|
||||
b.versioningConfig, err = versioning.ParseConfig(bytes.NewReader(b.VersioningConfigXML))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(b.QuotaConfigJSON) != 0 {
|
||||
b.quotaConfig, err = parseBucketQuota(b.Name, b.QuotaConfigJSON)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI ObjectLayer) error {
|
||||
legacyConfigs := []string{
|
||||
legacyBucketObjectLockEnabledConfigFile,
|
||||
bucketPolicyConfig,
|
||||
bucketNotificationConfig,
|
||||
bucketLifecycleConfig,
|
||||
bucketQuotaConfigFile,
|
||||
bucketSSEConfig,
|
||||
bucketTaggingConfig,
|
||||
objectLockConfig,
|
||||
}
|
||||
|
||||
configs := make(map[string][]byte)
|
||||
|
||||
// Handle migration from lockEnabled to newer format.
|
||||
if b.LockEnabled {
|
||||
configs[objectLockConfig] = enabledBucketObjectLockConfig
|
||||
b.LockEnabled = false // legacy value unset it
|
||||
// we are only interested in b.ObjectLockConfigXML or objectLockConfig value
|
||||
}
|
||||
|
||||
for _, legacyFile := range legacyConfigs {
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, legacyFile)
|
||||
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
// legacy file config not found, proceed to look for new metadata.
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
configs[legacyFile] = configData
|
||||
}
|
||||
|
||||
if len(configs) == 0 {
|
||||
// nothing to update, return right away.
|
||||
return b.parseAllConfigs(ctx, objectAPI)
|
||||
}
|
||||
|
||||
for legacyFile, configData := range configs {
|
||||
switch legacyFile {
|
||||
case legacyBucketObjectLockEnabledConfigFile:
|
||||
if string(configData) == legacyBucketObjectLockEnabledConfig {
|
||||
b.ObjectLockConfigXML = enabledBucketObjectLockConfig
|
||||
b.VersioningConfigXML = enabledBucketVersioningConfig
|
||||
b.LockEnabled = false // legacy value unset it
|
||||
// we are only interested in b.ObjectLockConfigXML
|
||||
}
|
||||
case bucketPolicyConfig:
|
||||
b.PolicyConfigJSON = configData
|
||||
case bucketNotificationConfig:
|
||||
b.NotificationConfigXML = configData
|
||||
case bucketLifecycleConfig:
|
||||
b.LifecycleConfigXML = configData
|
||||
case bucketSSEConfig:
|
||||
b.EncryptionConfigXML = configData
|
||||
case bucketTaggingConfig:
|
||||
b.TaggingConfigXML = configData
|
||||
case objectLockConfig:
|
||||
b.ObjectLockConfigXML = configData
|
||||
b.VersioningConfigXML = enabledBucketVersioningConfig
|
||||
case bucketQuotaConfigFile:
|
||||
b.QuotaConfigJSON = configData
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.Save(ctx, objectAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for legacyFile := range configs {
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, legacyFile)
|
||||
if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save config to supplied ObjectLayer api.
|
||||
func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
|
||||
if err := b.parseAllConfigs(ctx, api); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data := make([]byte, 4, b.Msgsize()+4)
|
||||
|
||||
// Initialize the header.
|
||||
binary.LittleEndian.PutUint16(data[0:2], bucketMetadataFormat)
|
||||
binary.LittleEndian.PutUint16(data[2:4], bucketMetadataVersion)
|
||||
|
||||
// Marshal the bucket metadata
|
||||
data, err := b.MarshalMsg(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, bucketMetadataFile)
|
||||
return saveConfig(ctx, api, configFile, data)
|
||||
}
|
||||
|
||||
// deleteBucketMetadata deletes bucket metadata
|
||||
// If config does not exist no error is returned.
|
||||
func deleteBucketMetadata(ctx context.Context, obj ObjectLayer, bucket string) error {
|
||||
metadataFiles := []string{
|
||||
dataUsageCacheName,
|
||||
bucketMetadataFile,
|
||||
}
|
||||
for _, metaFile := range metadataFiles {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, metaFile)
|
||||
if err := deleteConfig(ctx, obj, configFile); err != nil && err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user