Compare commits
461 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9cac385aec | ||
|
|
814ddc0923 | ||
|
|
247795dd36 | ||
|
|
dfadf70a7f | ||
|
|
d348ec0f6c | ||
|
|
b730bd1396 | ||
|
|
56e0c6adf8 | ||
|
|
f44a960dcd | ||
|
|
6c1bbf918d | ||
|
|
48e614b167 | ||
|
|
fe8d33452b | ||
|
|
c19ece6921 | ||
|
|
216fa57b88 | ||
|
|
a9558ae248 | ||
|
|
ee9077db7d | ||
|
|
9c85928740 | ||
|
|
af0309371e | ||
|
|
ead3c186a6 | ||
|
|
6ac48a65cb | ||
|
|
2ecf5ba1de | ||
|
|
94f1a1dea3 | ||
|
|
c045ae15e7 | ||
|
|
1756b7c6ff | ||
|
|
e25ace2151 | ||
|
|
a8e5a86fa0 | ||
|
|
f8edc233ab | ||
|
|
337c2a7cb4 | ||
|
|
2d735144b9 | ||
|
|
0113035237 | ||
|
|
52a1d248b2 | ||
|
|
b5ed42c845 | ||
|
|
d9e7cadacf | ||
|
|
36e88cbd50 | ||
|
|
6d76efb9bb | ||
|
|
a1de9cec58 | ||
|
|
6885c72f32 | ||
|
|
0f1389e992 | ||
|
|
5bf3eeaa77 | ||
|
|
9dda1fd624 | ||
|
|
2dc46cb153 | ||
|
|
0674c0075e | ||
|
|
518ef670da | ||
|
|
0dd626ec67 | ||
|
|
53f4c0fdc0 | ||
|
|
7e3ea77fdf | ||
|
|
7290d23b26 | ||
|
|
24f20eb1bd | ||
|
|
4c9de098b0 | ||
|
|
a2ccba69e5 | ||
|
|
8eb99d3a87 | ||
|
|
3773874cd3 | ||
|
|
6c62b1a2ea | ||
|
|
b768645fde | ||
|
|
7b58dcb28c | ||
|
|
fea4a1e68e | ||
|
|
a9e83dd42c | ||
|
|
145f501a21 | ||
|
|
9b3b04ecec | ||
|
|
3e063cca5c | ||
|
|
27d716c663 | ||
|
|
fbd15cb7b7 | ||
|
|
f7c91eff54 | ||
|
|
a6bdc086a2 | ||
|
|
1242dd951a | ||
|
|
d1c8e9f31b | ||
|
|
83ccae6c8b | ||
|
|
086be07bf5 | ||
|
|
28f9c477a8 | ||
|
|
09571d03a5 | ||
|
|
71ce63f79c | ||
|
|
5205c9591f | ||
|
|
9a547dcbfb | ||
|
|
27632ca6ec | ||
|
|
c7470e6e6e | ||
|
|
d090a17ed0 | ||
|
|
c2529260e7 | ||
|
|
da87188ff8 | ||
|
|
d099039f5d | ||
|
|
5dd9cf4398 | ||
|
|
ab77b216d1 | ||
|
|
b37a02cddf | ||
|
|
c3c3e9087b | ||
|
|
7ad6bc955f | ||
|
|
7a5271ad96 | ||
|
|
1b122526aa | ||
|
|
a3b266761e | ||
|
|
498389123e | ||
|
|
bc61417284 | ||
|
|
97d952e61c | ||
|
|
073aac3d92 | ||
|
|
eff4127efd | ||
|
|
b1c0c32ba6 | ||
|
|
f14bf25cb9 | ||
|
|
f216670814 | ||
|
|
6ecc98fddb | ||
|
|
4843affd0e | ||
|
|
558785a4bb | ||
|
|
60d415bb8a | ||
|
|
45e22cf8aa | ||
|
|
e4900b99d7 | ||
|
|
20766069a8 | ||
|
|
e8160c9fae | ||
|
|
957ecb1b64 | ||
|
|
ac5061df2c | ||
|
|
cddb2714ef | ||
|
|
d7d9cac20b | ||
|
|
6817c5ea58 | ||
|
|
4cd6ca02c7 | ||
|
|
a5efcbab51 | ||
|
|
85be7b39ac | ||
|
|
ebf3dda449 | ||
|
|
582953260b | ||
|
|
2d1ea86fc6 | ||
|
|
322385f1b6 | ||
|
|
1b38aed05f | ||
|
|
a69c98e394 | ||
|
|
282c9f790a | ||
|
|
2eeb0e6a0b | ||
|
|
3ff5bf2369 | ||
|
|
69ee28a082 | ||
|
|
d02deff3d7 | ||
|
|
3e78ea8acc | ||
|
|
b54c0f0ef3 | ||
|
|
c79358c67e | ||
|
|
75107d7698 | ||
|
|
c4464e36c8 | ||
|
|
d92db198d1 | ||
|
|
8bae956df6 | ||
|
|
7758524703 | ||
|
|
c82fa2c829 | ||
|
|
a51280fd20 | ||
|
|
bd437c1c17 | ||
|
|
69fb68ef0b | ||
|
|
787dbaff36 | ||
|
|
c50ae1fdbe | ||
|
|
bde0f444db | ||
|
|
6a8298b137 | ||
|
|
f19cbfad5c | ||
|
|
78f2183e70 | ||
|
|
5c11a46412 | ||
|
|
8a94aebdb8 | ||
|
|
ec11e99667 | ||
|
|
37d066b563 | ||
|
|
bfec5fe200 | ||
|
|
525287f4b6 | ||
|
|
9054ce73b2 | ||
|
|
d079adc167 | ||
|
|
a9d401ac10 | ||
|
|
1fa65c7f2f | ||
|
|
cc9b63eb51 | ||
|
|
7e12eab3ad | ||
|
|
fa685d7d9c | ||
|
|
4314ee1670 | ||
|
|
7d636a7c13 | ||
|
|
bf9d51cf14 | ||
|
|
29e0727b58 | ||
|
|
c434dff0a4 | ||
|
|
b2a8cb4aba | ||
|
|
b412a222ae | ||
|
|
79bcb705bf | ||
|
|
9f81d014f1 | ||
|
|
3184205519 | ||
|
|
7c919329e8 | ||
|
|
db41953618 | ||
|
|
cea078a593 | ||
|
|
f44cfb2863 | ||
|
|
1b45be0d60 | ||
|
|
6bb693488c | ||
|
|
e20e08d700 | ||
|
|
ac07df2985 | ||
|
|
2054ca5c9a | ||
|
|
e51e465543 | ||
|
|
2bbc6a83e8 | ||
|
|
a78731a3ba | ||
|
|
f4e779c964 | ||
|
|
a973402821 | ||
|
|
44decbeae0 | ||
|
|
3ea1be3c52 | ||
|
|
2642e12d14 | ||
|
|
e7276b7b9b | ||
|
|
e375341c33 | ||
|
|
2c20716f37 | ||
|
|
928f5b0564 | ||
|
|
91f21ddc47 | ||
|
|
43a3778b45 | ||
|
|
b9b1bfefe7 | ||
|
|
05cda35b14 | ||
|
|
2155e74951 | ||
|
|
4714958e99 | ||
|
|
c6e62b9175 | ||
|
|
ab66b23194 | ||
|
|
73f9d8a636 | ||
|
|
541a778d7b | ||
|
|
d49f2ec19c | ||
|
|
8dd63a462f | ||
|
|
9902c9baaa | ||
|
|
7de29e6e6b | ||
|
|
336460f67e | ||
|
|
95e89f1712 | ||
|
|
886ae15464 | ||
|
|
d8af244708 | ||
|
|
c8243706b4 | ||
|
|
30707659b5 | ||
|
|
90c365a174 | ||
|
|
7b732b566f | ||
|
|
ba52a925f9 | ||
|
|
fdda5f98c6 | ||
|
|
fa4d627b57 | ||
|
|
2c3e34f001 | ||
|
|
7f8f1ad4e3 | ||
|
|
6f992134a2 | ||
|
|
0c80bf45d0 | ||
|
|
2777956581 | ||
|
|
b207520d98 | ||
|
|
2196fd9cd5 | ||
|
|
ef6304c5c2 | ||
|
|
6b984410d5 | ||
|
|
813e0fc1a8 | ||
|
|
38cf263409 | ||
|
|
6f6a2214fc | ||
|
|
791821d590 | ||
|
|
9a951da881 | ||
|
|
e7a0be5bd3 | ||
|
|
ff932ca2a0 | ||
|
|
818d3bcaf5 | ||
|
|
45b1c66195 | ||
|
|
da04cb91ce | ||
|
|
cfc9cfd84a | ||
|
|
ea18e51f4d | ||
|
|
3d3beb6a9d | ||
|
|
27b8f18cce | ||
|
|
bf545dc320 | ||
|
|
f001e99fcd | ||
|
|
b4bfdc92cc | ||
|
|
ae654831aa | ||
|
|
1ffa983a9d | ||
|
|
ecf1566266 | ||
|
|
c5b87f93dd | ||
|
|
b1a2169dcc | ||
|
|
d45a1808f2 | ||
|
|
db2155551a | ||
|
|
09d35d3b4c | ||
|
|
5b9342d35c | ||
|
|
8d98662633 | ||
|
|
7fdeb44372 | ||
|
|
59dced8237 | ||
|
|
496f4a7dc7 | ||
|
|
8b880a246a | ||
|
|
eeb5942b6b | ||
|
|
7ec904d67b | ||
|
|
c9212819af | ||
|
|
10fd53d6bb | ||
|
|
3fea1d5e35 | ||
|
|
35ecc04223 | ||
|
|
3ca9f5ffa3 | ||
|
|
2e9fed1a14 | ||
|
|
6b92f3fd99 | ||
|
|
06e30b5aa1 | ||
|
|
603cf2a8bb | ||
|
|
a54cdb9587 | ||
|
|
ed4bd20a7c | ||
|
|
cfd12914e1 | ||
|
|
c55aeaf814 | ||
|
|
fdf65aa9b9 | ||
|
|
69b2aacf5a | ||
|
|
0af62d35a0 | ||
|
|
7c32f3f554 | ||
|
|
cec8cdb35e | ||
|
|
5ab9cc029d | ||
|
|
667f42515a | ||
|
|
3614cb7a8b | ||
|
|
b809c84338 | ||
|
|
33edb072a3 | ||
|
|
6a00eb10bf | ||
|
|
792ee48d2c | ||
|
|
52873ac3a3 | ||
|
|
88ae0f1196 | ||
|
|
23a0415eb7 | ||
|
|
75a0661213 | ||
|
|
a1c7c9ea73 | ||
|
|
7f19a9a617 | ||
|
|
2f2c7d91a8 | ||
|
|
9ad1c2d07d | ||
|
|
07a7f329e7 | ||
|
|
c7ca791c58 | ||
|
|
d9be8bc693 | ||
|
|
9fc7537f2a | ||
|
|
f1b2462193 | ||
|
|
8fbf2b0b2a | ||
|
|
c93157019f | ||
|
|
e3b44c3829 | ||
|
|
978bd4e2c4 | ||
|
|
bb942c7311 | ||
|
|
5d25b10f72 | ||
|
|
eac02c04f7 | ||
|
|
1330e59307 | ||
|
|
2dd14c0b89 | ||
|
|
5b8975bf4b | ||
|
|
6f66f1a910 | ||
|
|
deb3911f5e | ||
|
|
23a8411732 | ||
|
|
ece0d4ac53 | ||
|
|
4c92bec619 | ||
|
|
dcd63b4146 | ||
|
|
224b4f13b8 | ||
|
|
51a9d1bdb7 | ||
|
|
b2db1e96e2 | ||
|
|
ab7d3cd508 | ||
|
|
852fb320f7 | ||
|
|
8fb37a8417 | ||
|
|
d4dcf1d722 | ||
|
|
02a59a04d1 | ||
|
|
16a6e68d7b | ||
|
|
1b427ddb69 | ||
|
|
02acff7fac | ||
|
|
712e82344c | ||
|
|
9f298d2311 | ||
|
|
716a52f261 | ||
|
|
e4020fb41f | ||
|
|
d1144c2c7e | ||
|
|
78125ee853 | ||
|
|
9ecd66007f | ||
|
|
64ec17b463 | ||
|
|
6b9805e891 | ||
|
|
013773065c | ||
|
|
7d6766adc6 | ||
|
|
c56c2f5fd3 | ||
|
|
7e819d00ea | ||
|
|
33767266e7 | ||
|
|
63be4709b7 | ||
|
|
6b1f2fc133 | ||
|
|
f749a9bf0e | ||
|
|
9b4d46a6ed | ||
|
|
502e652b7a | ||
|
|
de924605a1 | ||
|
|
15e2ea2c96 | ||
|
|
07edb7c7f8 | ||
|
|
d0cea7adea | ||
|
|
2165d45d3f | ||
|
|
6d5d77f62c | ||
|
|
49df290270 | ||
|
|
b1bfd75fcf | ||
|
|
e5951e30d0 | ||
|
|
45d725c0a3 | ||
|
|
c2c5b09bb1 | ||
|
|
086fbb745e | ||
|
|
4f37c8ccf2 | ||
|
|
026265f8f7 | ||
|
|
f91c072f61 | ||
|
|
52bdbcd046 | ||
|
|
301c50b721 | ||
|
|
e9c111c8d0 | ||
|
|
278a165674 | ||
|
|
a05af9bb98 | ||
|
|
e934c3e2a2 | ||
|
|
2d295a31de | ||
|
|
9bbf5cb74f | ||
|
|
680e493065 | ||
|
|
1ea2449269 | ||
|
|
7ce63b3078 | ||
|
|
7432b5c9b2 | ||
|
|
d76160c245 | ||
|
|
5d838edcef | ||
|
|
c9116e6bd7 | ||
|
|
d7dc9aaf52 | ||
|
|
bfe8a9bccc | ||
|
|
9990464cd5 | ||
|
|
881e983ed9 | ||
|
|
f6a7d4d29b | ||
|
|
f98616dce7 | ||
|
|
5bd0e95eef | ||
|
|
0414f01b6e | ||
|
|
0cbebf0f57 | ||
|
|
4cb6ebcfa2 | ||
|
|
2232e095d5 | ||
|
|
aae337f5b5 | ||
|
|
a78e5d4763 | ||
|
|
cf37c7997e | ||
|
|
1ffbb5c24c | ||
|
|
b9c48e0ab0 | ||
|
|
fe5d599802 | ||
|
|
55063906b5 | ||
|
|
c7178d2066 | ||
|
|
f14f60a487 | ||
|
|
e2b3c083aa | ||
|
|
3011692d93 | ||
|
|
86252ec7e1 | ||
|
|
ef1aa870c5 | ||
|
|
c260182412 | ||
|
|
61c17c8933 | ||
|
|
dd93eee1e3 | ||
|
|
7a400542ae | ||
|
|
88286cf8d0 | ||
|
|
8cb6184f1d | ||
|
|
b849fd7a75 | ||
|
|
09ee145e9c | ||
|
|
04dfcbfdd7 | ||
|
|
23e46f9dba | ||
|
|
fc5213258e | ||
|
|
005ebbb9b2 | ||
|
|
017067e11f | ||
|
|
2bb69033e5 | ||
|
|
fca4ee84c9 | ||
|
|
60e60f68dd | ||
|
|
ba758361b3 | ||
|
|
c6b218e5df | ||
|
|
c18fbdb29a | ||
|
|
b1ad99edbf | ||
|
|
080e0c2323 | ||
|
|
935546d5ca | ||
|
|
64fde1ab95 | ||
|
|
169e8742fc | ||
|
|
069876e262 | ||
|
|
442e1698cb | ||
|
|
d76518eeb9 | ||
|
|
0879a4f743 | ||
|
|
9be7066715 | ||
|
|
d8660b30cc | ||
|
|
30922148fb | ||
|
|
37b32199e3 | ||
|
|
d74818b227 | ||
|
|
627fdfeab7 | ||
|
|
3320878dfb | ||
|
|
3f20011862 | ||
|
|
5f34b5e6a5 | ||
|
|
9199033db7 | ||
|
|
2bf6cf0e15 | ||
|
|
686d4656de | ||
|
|
5aa5dcdc6d | ||
|
|
0a70bc24ac | ||
|
|
656146b699 | ||
|
|
5e40b9a563 | ||
|
|
89d1221217 | ||
|
|
4cd1bbb50a | ||
|
|
c2cde6beb5 | ||
|
|
abc1c1070a | ||
|
|
fd56aa42a6 | ||
|
|
3d318bae76 | ||
|
|
aa2e89bfe3 | ||
|
|
60813bef29 | ||
|
|
b123be5612 | ||
|
|
933c60bc3a | ||
|
|
796cca4166 | ||
|
|
fe379f9428 | ||
|
|
ae0b165431 | ||
|
|
7a06e158f1 | ||
|
|
5eab3db344 | ||
|
|
9f44fcd540 | ||
|
|
b9b2b37366 | ||
|
|
7f31d933a8 | ||
|
|
6695fd6a61 | ||
|
|
84e55e2e6f | ||
|
|
b00cda8ad4 | ||
|
|
d861edfc00 | ||
|
|
dd311623df | ||
|
|
cb935980a5 | ||
|
|
157721f694 | ||
|
|
97d799b9f0 | ||
|
|
0b7bd024fb | ||
|
|
3af70b36fd | ||
|
|
8eba97da74 |
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: MinIO Community Support
|
||||
url: https://slack.min.io
|
||||
about: Please ask and answer questions here.
|
||||
- name: MinIO SUBNET Support
|
||||
url: https://min.io/pricing
|
||||
about: Join this for Enterprise Support.
|
||||
39
.github/lock.yml
vendored
Normal file
39
.github/lock.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
# Configuration for Lock Threads - https://github.com/dessant/lock-threads-app
|
||||
|
||||
# Number of days of inactivity before a closed issue or pull request is locked
|
||||
daysUntilLock: 365
|
||||
|
||||
# Skip issues and pull requests created before a given timestamp. Timestamp must
|
||||
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
|
||||
skipCreatedBefore: false
|
||||
|
||||
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
|
||||
exemptLabels: []
|
||||
|
||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
||||
lockLabel: false
|
||||
|
||||
# Comment to post before locking. Set to `false` to disable
|
||||
lockComment: >-
|
||||
|
||||
This thread has been automatically locked since there has not been
|
||||
any recent activity after it was closed. Please open a new issue for
|
||||
related bugs.
|
||||
|
||||
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
||||
setLockReason: true
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
only: issues
|
||||
|
||||
# Optionally, specify configuration settings just for `issues` or `pulls`
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - help-wanted
|
||||
# lockLabel: outdated
|
||||
|
||||
# pulls:
|
||||
# daysUntilLock: 30
|
||||
|
||||
# Repository to extend settings from
|
||||
# _extends: repo
|
||||
59
.github/stale.yml
vendored
Normal file
59
.github/stale.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
# Configuration for probot-stale - https://github.com/probot/stale
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||
daysUntilStale: 90
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||
daysUntilClose: 30
|
||||
|
||||
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||
onlyLabels: []
|
||||
|
||||
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||
exemptLabels:
|
||||
- "security"
|
||||
- "pending discussion"
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
# Set to true to ignore issues in a milestone (defaults to false)
|
||||
exemptMilestones: false
|
||||
|
||||
# Set to true to ignore issues with an assignee (defaults to false)
|
||||
exemptAssignees: false
|
||||
|
||||
# Label to use when marking as stale
|
||||
staleLabel: stale
|
||||
|
||||
# Comment to post when marking as stale. Set to `false` to disable
|
||||
markComment: >-
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed after 21 days if no further activity
|
||||
occurs. Thank you for your contributions.
|
||||
# Comment to post when removing the stale label.
|
||||
# unmarkComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Comment to post when closing a stale Issue or Pull Request.
|
||||
# closeComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||
limitPerRun: 1
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
# only: issues
|
||||
|
||||
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||
# pulls:
|
||||
# daysUntilStale: 30
|
||||
# markComment: >
|
||||
# This pull request has been automatically marked as stale because it has not had
|
||||
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||
# for your contributions.
|
||||
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - confirmed
|
||||
52
.github/workflows/go.yml
vendored
Normal file
52
.github/workflows/go.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: Go
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Test on Go ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.13.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12'
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
SIMPLE_CI: 1
|
||||
run: |
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 30m ./...
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
SIMPLE_CI: 1
|
||||
run: |
|
||||
sudo apt-get install devscripts shellcheck
|
||||
make
|
||||
diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
make test-race
|
||||
make crosscompile
|
||||
make verify
|
||||
make verify-healing
|
||||
cd browser && npm install && npm run test && cd ..
|
||||
bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"numFilesToCheck": 10,
|
||||
"requiredOrgs": ["minio"]
|
||||
}
|
||||
59
.travis.yml
59
.travis.yml
@@ -1,59 +0,0 @@
|
||||
go_import_path: github.com/minio/minio
|
||||
|
||||
language: go
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- shellcheck
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
# this ensures PRs based on a local branch are not built twice
|
||||
# the downside is that a PR targeting a different branch is not built
|
||||
# but as a workaround you can add the branch to this list
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
- SIMPLE_CI=1
|
||||
go: 1.13.x
|
||||
script:
|
||||
- make
|
||||
- diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
- diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 20m "$d"; done
|
||||
- make verifiers
|
||||
- make crosscompile
|
||||
- make verify
|
||||
- cd browser && npm install && npm run test && cd ..
|
||||
- bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
|
||||
|
||||
- os: windows
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
- SIMPLE_CI=1
|
||||
go: 1.13.x
|
||||
script:
|
||||
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
|
||||
- for d in $(go list ./... | grep -v browser); do go test -v --timeout 20m "$d"; done
|
||||
|
||||
before_script:
|
||||
# Add an IPv6 config - see the corresponding Travis issue
|
||||
# https://github.com/travis-ci/travis-ci/issues/8361
|
||||
- if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'; fi
|
||||
|
||||
before_install:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install stable ; fi
|
||||
@@ -1,4 +1,4 @@
|
||||
# MinIO Contribution Guide [](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
# MinIO Contribution Guide [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
``MinIO`` community welcomes your contribution. To make the process as seamless as possible, we recommend you read this contribution guide.
|
||||
|
||||
|
||||
@@ -5,24 +5,25 @@ LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
ENV GOPROXY https://proxy.golang.org
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio && cd minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM alpine:3.9
|
||||
FROM alpine:3.10
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=0 /go/bin/minio /usr/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY --from=0 /go/minio/CREDITS /third_party/
|
||||
COPY --from=0 /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
|
||||
41
Dockerfile.arm.release
Normal file
41
Dockerfile.arm.release
Normal file
@@ -0,0 +1,41 @@
|
||||
FROM golang:1.13-alpine as builder
|
||||
|
||||
WORKDIR /home
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git 'curl>7.61.0' && \
|
||||
git clone https://github.com/minio/minio && \
|
||||
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
|
||||
|
||||
FROM arm32v7/alpine:3.10
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.min.io/server/minio/release/linux-arm/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
41
Dockerfile.arm64.release
Normal file
41
Dockerfile.arm64.release
Normal file
@@ -0,0 +1,41 @@
|
||||
FROM golang:1.13-alpine as builder
|
||||
|
||||
WORKDIR /home
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git 'curl>7.61.0' && \
|
||||
git clone https://github.com/minio/minio && \
|
||||
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
|
||||
|
||||
FROM arm64v8/alpine:3.10
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.min.io/server/minio/release/linux-arm64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,14 +1,16 @@
|
||||
FROM alpine:3.9
|
||||
FROM alpine:3.10
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY minio /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
|
||||
@@ -1,17 +1,11 @@
|
||||
FROM ubuntu:16.04
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
|
||||
ENV GOPATH /usr/local
|
||||
|
||||
ENV GOPATH /usr/local/gopath
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
|
||||
ENV MINT_ROOT_DIR /mint
|
||||
|
||||
COPY mint /mint
|
||||
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && \
|
||||
|
||||
@@ -8,16 +8,18 @@ RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio
|
||||
|
||||
FROM alpine:3.9
|
||||
FROM alpine:3.10
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
|
||||
@@ -34,11 +34,13 @@ USER ci
|
||||
RUN make
|
||||
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
|
||||
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
|
||||
RUN for d in $(go list ./... | grep -v browser); do go test -v -race --timeout 20m "$d"; done
|
||||
RUN make verifiers
|
||||
RUN make test-race
|
||||
RUN make crosscompile
|
||||
RUN make verify
|
||||
|
||||
## -- add healing tests
|
||||
RUN make verify-healing
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 2: Test Frontend
|
||||
#-------------------------------------------------------------
|
||||
@@ -55,7 +57,7 @@ RUN yarn test
|
||||
#-------------------------------------------------------------
|
||||
# Stage 3: Run Gateway Tests
|
||||
#-------------------------------------------------------------
|
||||
FROM ubuntu:16.04
|
||||
FROM ubuntu:18.04
|
||||
|
||||
COPY --from=0 /go/src/github.com/minio/minio/minio /usr/bin/minio
|
||||
COPY buildscripts/gateway-tests.sh /usr/bin/gateway-tests.sh
|
||||
@@ -64,7 +66,7 @@ COPY mint /mint
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV LANG C.UTF-8
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /usr/local
|
||||
ENV GOPATH /usr/local/gopath
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
ENV SIMPLE_CI 1
|
||||
ENV MINT_ROOT_DIR /mint
|
||||
|
||||
19
Makefile
19
Makefile
@@ -22,7 +22,7 @@ getdeps:
|
||||
ifeq ($(GOARCH),s390x)
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && GO111MODULE=off go get honnef.co/go/tools/cmd/staticcheck)
|
||||
else
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet https://github.com/dominikh/go-tools/releases/download/2019.2.3/staticcheck_${GOOS}_${GOARCH}.tar.gz && tar xf staticcheck_${GOOS}_${GOARCH}.tar.gz && mv staticcheck/staticcheck ${GOPATH}/bin/staticcheck && chmod +x ${GOPATH}/bin/staticcheck && rm -f staticcheck_${GOOS}_${GOARCH}.tar.gz && rm -rf staticcheck)
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet https://github.com/dominikh/go-tools/releases/download/2020.1.3/staticcheck_${GOOS}_${GOARCH}.tar.gz && tar xf staticcheck_${GOOS}_${GOARCH}.tar.gz && mv staticcheck/staticcheck ${GOPATH}/bin/staticcheck && chmod +x ${GOPATH}/bin/staticcheck && rm -f staticcheck_${GOOS}_${GOARCH}.tar.gz && rm -rf staticcheck)
|
||||
endif
|
||||
@which misspell 1>/dev/null || (echo "Installing misspell" && GO111MODULE=off go get -u github.com/client9/misspell/cmd/misspell)
|
||||
|
||||
@@ -64,20 +64,26 @@ test: verifiers build
|
||||
@echo "Running unit tests"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
|
||||
test-race: verifiers build
|
||||
@echo "Running unit tests under -race"
|
||||
@(env bash $(PWD)/buildscripts/race.sh)
|
||||
|
||||
# Verify minio binary
|
||||
verify:
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
coverage: build
|
||||
@echo "Running all coverage for minio"
|
||||
@(env bash $(PWD)/buildscripts/go-coverage.sh)
|
||||
# Verify healing of disks with minio binary
|
||||
verify-healing:
|
||||
@echo "Verify healing build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
|
||||
# Builds minio locally.
|
||||
build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
docker: build
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
@@ -95,3 +101,4 @@ clean:
|
||||
@rm -rvf minio
|
||||
@rm -rvf build
|
||||
@rm -rvf release
|
||||
@rm -rvf .verify*
|
||||
|
||||
4
NOTICE
4
NOTICE
@@ -1,4 +1,4 @@
|
||||
MinIO Cloud Storage, (C) 2014,2015 MinIO, Inc.
|
||||
MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc.
|
||||
|
||||
This product includes software developed at MinIO, Inc.
|
||||
(https://min.io/).
|
||||
@@ -6,4 +6,4 @@ This product includes software developed at MinIO, Inc.
|
||||
The MinIO project contains unmodified/modified subcomponents too with
|
||||
separate copyright notices and license terms. Your use of the source
|
||||
code for these subcomponents is subject to the terms and conditions
|
||||
of the following licenses.
|
||||
of Apache License Version 2.0
|
||||
|
||||
12
README.md
12
README.md
@@ -1,5 +1,5 @@
|
||||
# MinIO Quickstart Guide
|
||||
[](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
[](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
[](https://min.io)
|
||||
|
||||
@@ -21,7 +21,7 @@ docker run -p 9000:9000 minio/minio:edge server /data
|
||||
> NOTE: Docker will not display the default keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use default keys with containers. Please visit MinIO Docker quickstart guide for more information [here](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
## macOS
|
||||
### Homebrew
|
||||
### Homebrew (recommended)
|
||||
Install minio packages using [Homebrew](http://brew.sh/)
|
||||
```sh
|
||||
brew install minio/stable/minio
|
||||
@@ -74,7 +74,7 @@ minio.exe server D:\Photos
|
||||
|
||||
## FreeBSD
|
||||
### Port
|
||||
Install minio packages using [pkg](https://github.com/freebsd/pkg)
|
||||
Install minio packages using [pkg](https://github.com/freebsd/pkg), MinIO doesn't officially build FreeBSD binaries but is maintained by FreeBSD upstream [here](https://www.freshports.org/www/minio).
|
||||
|
||||
```sh
|
||||
pkg install minio
|
||||
@@ -185,11 +185,5 @@ mc admin update <minio alias, e.g., myminio>
|
||||
## Contribute to MinIO Project
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
|
||||
## Caveats
|
||||
MinIO in its default mode doesn't use MD5Sum checkums of incoming streams unless requested by the client in `Content-Md5` header for validation. This may lead to incompatibility with rare S3 clients like `s3ql` which unfortunately do not set `Content-Md5` but depend on hex MD5Sum for the stream to be calculated by the server. MinIO considers this as a bug in `s3ql` and should be fixed on the client side because MD5Sum is a poor way to checksum and validate the authenticity of the objects. Although MinIO provides a workaround until client applications are fixed use `--compat` option instead to start the server.
|
||||
```sh
|
||||
./minio --compat server /data
|
||||
```
|
||||
|
||||
## License
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Fminio%2Fminio?ref=badge_large)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# MinIO Quickstart Guide [](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
# MinIO Quickstart Guide [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
MinIO 是一个基于Apache License v2.0开源协议的对象存储服务。它兼容亚马逊S3云存储服务接口,非常适合于存储大容量非结构化的数据,例如图片、视频、日志文件、备份数据和容器/虚拟机镜像等,而一个对象文件可以是任意大小,从几kb到最大5T不等。
|
||||
|
||||
|
||||
22
SECURITY.md
22
SECURITY.md
@@ -8,12 +8,12 @@ Whenever there is a security update you just need to upgrade to the latest versi
|
||||
## Reporting a Vulnerability
|
||||
|
||||
All security bugs in [minio/minio](https://github,com/minio/minio) (or other minio/* repositories)
|
||||
should be reported by email to security@min.io. Your email will be acknowledged within 48 hours,
|
||||
and you'll receive a more detailed response to your email within 72 hours indicating the next steps
|
||||
in handling your report.
|
||||
should be reported by email to security@min.io. Your email will be acknowledged within 48 hours,
|
||||
and you'll receive a more detailed response to your email within 72 hours indicating the next steps
|
||||
in handling your report.
|
||||
|
||||
Please, provide a detailed explanation of the issue. In particular, outline the type of the security
|
||||
issue (DoS, authentication bypass, information disclouse, ...) and the assumptions you're making (e.g. do
|
||||
Please, provide a detailed explanation of the issue. In particular, outline the type of the security
|
||||
issue (DoS, authentication bypass, information disclose, ...) and the assumptions you're making (e.g. do
|
||||
you need access credentials for a successful exploit).
|
||||
|
||||
If you have not received a reply to your email within 48 hours or you have not heard from the security team
|
||||
@@ -28,14 +28,14 @@ MinIO uses the following disclosure process:
|
||||
|
||||
1. Once the security report is received one member of the security team tries to verify and reproduce
|
||||
the issue and determines the impact it has.
|
||||
2. A member of the security team will respond and either confrim or reject the security report.
|
||||
If the report is rejected the repsonse explains why.
|
||||
2. A member of the security team will respond and either confirm or reject the security report.
|
||||
If the report is rejected the response explains why.
|
||||
3. Code is audited to find any potential similar problems.
|
||||
4. Fixes are prepared for the latest release.
|
||||
4. Fixes are prepared for the latest release.
|
||||
5. On the date that the fixes are applied a security advisory will be published on https://blog.min.io.
|
||||
Please inform us in your report email whether MinIO should mention your contribution w.r.t. fixing
|
||||
the security issue. By default MinIO will **not** publish this information to protect your privacy.
|
||||
|
||||
|
||||
This process can take some time, especially when coordination is required with maintainers of other projects.
|
||||
Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we
|
||||
follow the process described above to ensure that disclosures are handled consistently.
|
||||
Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we
|
||||
follow the process described above to ensure that disclosures are handled consistently.
|
||||
|
||||
@@ -88,11 +88,6 @@ export class ChangePasswordModal extends React.Component {
|
||||
|
||||
canChangePassword() {
|
||||
const { serverInfo } = this.props
|
||||
// Password change is not allowed in WORM mode
|
||||
if (serverInfo.info.isWorm) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Password change is not allowed for temporary users(STS)
|
||||
if(serverInfo.userInfo.isTempUser) {
|
||||
return false
|
||||
|
||||
@@ -148,7 +148,8 @@ export class Login extends React.Component {
|
||||
<OpenIDLoginButton
|
||||
className="btn openid-btn"
|
||||
clientId={this.state.clientId}
|
||||
authorizationEndpoint={this.state.discoveryDoc.authorization_endpoint}
|
||||
authEp={this.state.discoveryDoc.authorization_endpoint}
|
||||
authScopes={this.state.discoveryDoc.scopes_supported}
|
||||
>
|
||||
Log in with OpenID
|
||||
</OpenIDLoginButton>
|
||||
|
||||
@@ -66,6 +66,7 @@ export class OpenIDLogin extends React.Component {
|
||||
|
||||
const authURL = buildOpenIDAuthURL(
|
||||
this.state.discoveryDoc.authorization_endpoint,
|
||||
this.state.discoveryDoc.scopes_supported,
|
||||
redirectURI,
|
||||
this.state.clientID,
|
||||
nonce
|
||||
|
||||
@@ -27,7 +27,7 @@ export class OpenIDLoginButton extends React.Component {
|
||||
|
||||
handleClick(event) {
|
||||
event.stopPropagation()
|
||||
const { authorizationEndpoint, clientId } = this.props
|
||||
const { authEp, authScopes, clientId } = this.props
|
||||
|
||||
let redirectURI = window.location.href.split("#")[0]
|
||||
if (redirectURI.endsWith('/')) {
|
||||
@@ -40,7 +40,7 @@ export class OpenIDLoginButton extends React.Component {
|
||||
const nonce = getRandomString(16)
|
||||
storage.setItem(OPEN_ID_NONCE_KEY, nonce)
|
||||
|
||||
const authURL = buildOpenIDAuthURL(authorizationEndpoint, redirectURI, clientId, nonce)
|
||||
const authURL = buildOpenIDAuthURL(authEp, authScopes, redirectURI, clientId, nonce)
|
||||
window.location = authURL
|
||||
}
|
||||
|
||||
|
||||
@@ -64,17 +64,6 @@ describe("ChangePasswordModal", () => {
|
||||
shallow(<ChangePasswordModal serverInfo={serverInfo} />)
|
||||
})
|
||||
|
||||
it("should not allow changing password when isWorm is true", () => {
|
||||
const newServerInfo = { ...serverInfo, info: { isWorm: true } }
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
|
||||
expect(
|
||||
wrapper
|
||||
.find("ModalBody")
|
||||
.childAt(0)
|
||||
.text()
|
||||
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
|
||||
})
|
||||
|
||||
it("should not allow changing password when not IAM user", () => {
|
||||
const newServerInfo = {
|
||||
...serverInfo,
|
||||
|
||||
24
browser/app/js/browser/selectors.js
Normal file
24
browser/app/js/browser/selectors.js
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { createSelector } from "reselect"
|
||||
|
||||
export const getServerInfo = state => state.browser.serverInfo
|
||||
|
||||
export const hasServerPublicDomain = createSelector(
|
||||
getServerInfo,
|
||||
serverInfo => Boolean(serverInfo.info && serverInfo.info.domains && serverInfo.info.domains.length),
|
||||
)
|
||||
@@ -16,14 +16,13 @@
|
||||
|
||||
export const OPEN_ID_NONCE_KEY = 'openIDKey'
|
||||
|
||||
export const buildOpenIDAuthURL = (authorizationEndpoint, redirectURI, clientID, nonce) => {
|
||||
export const buildOpenIDAuthURL = (authEp, authScopes, redirectURI, clientID, nonce) => {
|
||||
const params = new URLSearchParams()
|
||||
params.set("response_type", "id_token")
|
||||
params.set("scope", "openid")
|
||||
params.set("scope", authScopes.join(" "))
|
||||
params.set("client_id", clientID)
|
||||
params.set("redirect_uri", redirectURI)
|
||||
params.set("nonce", nonce)
|
||||
|
||||
return `${authorizationEndpoint}?${params.toString()}`
|
||||
return `${authEp}?${params.toString()}`
|
||||
}
|
||||
|
||||
|
||||
@@ -14,30 +14,67 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import mimedb from 'mime-types'
|
||||
import mimedb from "mime-types"
|
||||
|
||||
const isFolder = (name, contentType) => {
|
||||
if (name.endsWith('/')) return true
|
||||
if (name.endsWith("/")) return true
|
||||
return false
|
||||
}
|
||||
|
||||
const isPdf = (name, contentType) => {
|
||||
if (contentType === 'application/pdf') return true
|
||||
if (contentType === "application/pdf") return true
|
||||
return false
|
||||
}
|
||||
const isImage = (name, contentType) => {
|
||||
if (
|
||||
contentType === "image/jpeg" ||
|
||||
contentType === "image/gif" ||
|
||||
contentType === "image/x-icon" ||
|
||||
contentType === "image/png" ||
|
||||
contentType === "image/svg+xml" ||
|
||||
contentType === "image/tiff" ||
|
||||
contentType === "image/webp"
|
||||
)
|
||||
return true
|
||||
return false
|
||||
}
|
||||
|
||||
const isZip = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
if (contentType.split('/')[1].includes('zip')) return true
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
if (contentType.split("/")[1].includes("zip")) return true
|
||||
return false
|
||||
}
|
||||
|
||||
const isCode = (name, contentType) => {
|
||||
const codeExt = ['c', 'cpp', 'go', 'py', 'java', 'rb', 'js', 'pl', 'fs',
|
||||
'php', 'css', 'less', 'scss', 'coffee', 'net', 'html',
|
||||
'rs', 'exs', 'scala', 'hs', 'clj', 'el', 'scm', 'lisp',
|
||||
'asp', 'aspx']
|
||||
const ext = name.split('.').reverse()[0]
|
||||
const codeExt = [
|
||||
"c",
|
||||
"cpp",
|
||||
"go",
|
||||
"py",
|
||||
"java",
|
||||
"rb",
|
||||
"js",
|
||||
"pl",
|
||||
"fs",
|
||||
"php",
|
||||
"css",
|
||||
"less",
|
||||
"scss",
|
||||
"coffee",
|
||||
"net",
|
||||
"html",
|
||||
"rs",
|
||||
"exs",
|
||||
"scala",
|
||||
"hs",
|
||||
"clj",
|
||||
"el",
|
||||
"scm",
|
||||
"lisp",
|
||||
"asp",
|
||||
"aspx",
|
||||
]
|
||||
const ext = name.split(".").reverse()[0]
|
||||
for (var i in codeExt) {
|
||||
if (ext === codeExt[i]) return true
|
||||
}
|
||||
@@ -45,9 +82,9 @@ const isCode = (name, contentType) => {
|
||||
}
|
||||
|
||||
const isExcel = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
const types = ['excel', 'spreadsheet']
|
||||
const subType = contentType.split('/')[1]
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
const types = ["excel", "spreadsheet"]
|
||||
const subType = contentType.split("/")[1]
|
||||
for (var i in types) {
|
||||
if (subType.includes(types[i])) return true
|
||||
}
|
||||
@@ -55,9 +92,9 @@ const isExcel = (name, contentType) => {
|
||||
}
|
||||
|
||||
const isDoc = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
const types = ['word', '.document']
|
||||
const subType = contentType.split('/')[1]
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
const types = ["word", ".document"]
|
||||
const subType = contentType.split("/")[1]
|
||||
for (var i in types) {
|
||||
if (subType.includes(types[i])) return true
|
||||
}
|
||||
@@ -65,9 +102,9 @@ const isDoc = (name, contentType) => {
|
||||
}
|
||||
|
||||
const isPresentation = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
var types = ['powerpoint', 'presentation']
|
||||
const subType = contentType.split('/')[1]
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
var types = ["powerpoint", "presentation"]
|
||||
const subType = contentType.split("/")[1]
|
||||
for (var i in types) {
|
||||
if (subType.includes(types[i])) return true
|
||||
}
|
||||
@@ -76,31 +113,32 @@ const isPresentation = (name, contentType) => {
|
||||
|
||||
const typeToIcon = (type) => {
|
||||
return (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
if (contentType.split('/')[0] === type) return true
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
if (contentType.split("/")[0] === type) return true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
export const getDataType = (name, contentType) => {
|
||||
if (contentType === "") {
|
||||
contentType = mimedb.lookup(name) || 'application/octet-stream'
|
||||
contentType = mimedb.lookup(name) || "application/octet-stream"
|
||||
}
|
||||
const check = [
|
||||
['folder', isFolder],
|
||||
['code', isCode],
|
||||
['audio', typeToIcon('audio')],
|
||||
['image', typeToIcon('image')],
|
||||
['video', typeToIcon('video')],
|
||||
['text', typeToIcon('text')],
|
||||
['pdf', isPdf],
|
||||
['zip', isZip],
|
||||
['excel', isExcel],
|
||||
['doc', isDoc],
|
||||
['presentation', isPresentation]
|
||||
["folder", isFolder],
|
||||
["code", isCode],
|
||||
["audio", typeToIcon("audio")],
|
||||
["image", typeToIcon("image")],
|
||||
["video", typeToIcon("video")],
|
||||
["text", typeToIcon("text")],
|
||||
["pdf", isPdf],
|
||||
["image", isImage],
|
||||
["zip", isZip],
|
||||
["excel", isExcel],
|
||||
["doc", isDoc],
|
||||
["presentation", isPresentation],
|
||||
]
|
||||
for (var i in check) {
|
||||
if (check[i][1](name, contentType)) return check[i][0]
|
||||
}
|
||||
return 'other'
|
||||
return "other"
|
||||
}
|
||||
|
||||
@@ -19,18 +19,22 @@ import { connect } from "react-redux"
|
||||
import { Dropdown } from "react-bootstrap"
|
||||
import ShareObjectModal from "./ShareObjectModal"
|
||||
import DeleteObjectConfirmModal from "./DeleteObjectConfirmModal"
|
||||
import PreviewObjectModal from "./PreviewObjectModal"
|
||||
|
||||
import * as objectsActions from "./actions"
|
||||
import { getDataType } from "../mime.js"
|
||||
import {
|
||||
SHARE_OBJECT_EXPIRY_DAYS,
|
||||
SHARE_OBJECT_EXPIRY_HOURS,
|
||||
SHARE_OBJECT_EXPIRY_MINUTES
|
||||
SHARE_OBJECT_EXPIRY_MINUTES,
|
||||
} from "../constants"
|
||||
|
||||
export class ObjectActions extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
showDeleteConfirmation: false
|
||||
showDeleteConfirmation: false,
|
||||
showPreview: false,
|
||||
}
|
||||
}
|
||||
shareObject(e) {
|
||||
@@ -53,7 +57,20 @@ export class ObjectActions extends React.Component {
|
||||
}
|
||||
hideDeleteConfirmModal() {
|
||||
this.setState({
|
||||
showDeleteConfirmation: false
|
||||
showDeleteConfirmation: false,
|
||||
})
|
||||
}
|
||||
getObjectURL(objectname, callback) {
|
||||
const { getObjectURL } = this.props
|
||||
getObjectURL(objectname, callback)
|
||||
}
|
||||
showPreviewModal(e) {
|
||||
e.preventDefault()
|
||||
this.setState({ showPreview: true })
|
||||
}
|
||||
hidePreviewModal() {
|
||||
this.setState({
|
||||
showPreview: false,
|
||||
})
|
||||
}
|
||||
render() {
|
||||
@@ -69,6 +86,15 @@ export class ObjectActions extends React.Component {
|
||||
>
|
||||
<i className="fas fa-share-alt" />
|
||||
</a>
|
||||
{getDataType(object.name, object.contentType) == "image" && (
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
onClick={this.showPreviewModal.bind(this)}
|
||||
>
|
||||
<i className="far fa-file-image" />
|
||||
</a>
|
||||
)}
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
@@ -77,14 +103,22 @@ export class ObjectActions extends React.Component {
|
||||
<i className="fas fa-trash-alt" />
|
||||
</a>
|
||||
</Dropdown.Menu>
|
||||
{(showShareObjectModal && shareObjectName === object.name) &&
|
||||
<ShareObjectModal object={object} />}
|
||||
{showShareObjectModal && shareObjectName === object.name && (
|
||||
<ShareObjectModal object={object} />
|
||||
)}
|
||||
{this.state.showDeleteConfirmation && (
|
||||
<DeleteObjectConfirmModal
|
||||
deleteObject={this.deleteObject.bind(this)}
|
||||
hideDeleteConfirmModal={this.hideDeleteConfirmModal.bind(this)}
|
||||
/>
|
||||
)}
|
||||
{this.state.showPreview && (
|
||||
<PreviewObjectModal
|
||||
object={object}
|
||||
hidePreviewModal={this.hidePreviewModal.bind(this)}
|
||||
getObjectURL={this.getObjectURL.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</Dropdown>
|
||||
)
|
||||
}
|
||||
@@ -94,15 +128,17 @@ const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
showShareObjectModal: state.objects.shareObject.show,
|
||||
shareObjectName: state.objects.shareObject.object
|
||||
shareObjectName: state.objects.shareObject.object,
|
||||
}
|
||||
}
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
const mapDispatchToProps = (dispatch) => {
|
||||
return {
|
||||
shareObject: (object, days, hours, minutes) =>
|
||||
dispatch(objectsActions.shareObject(object, days, hours, minutes)),
|
||||
deleteObject: object => dispatch(objectsActions.deleteObject(object))
|
||||
deleteObject: (object) => dispatch(objectsActions.deleteObject(object)),
|
||||
getObjectURL: (object, callback) =>
|
||||
dispatch(objectsActions.getObjectURL(object, callback)),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
68
browser/app/js/objects/PreviewObjectModal.js
Normal file
68
browser/app/js/objects/PreviewObjectModal.js
Normal file
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { Modal, ModalHeader, ModalBody } from "react-bootstrap"
|
||||
|
||||
class PreviewObjectModal extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
url: "",
|
||||
}
|
||||
}
|
||||
|
||||
componentDidMount() {
|
||||
this.props.getObjectURL(this.props.object.name, (url) => {
|
||||
this.setState({
|
||||
url: url,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
render() {
|
||||
const { hidePreviewModal } = this.props
|
||||
return (
|
||||
<Modal
|
||||
show={true}
|
||||
animation={false}
|
||||
onHide={hidePreviewModal}
|
||||
bsSize="large"
|
||||
>
|
||||
<ModalHeader>Preview</ModalHeader>
|
||||
<ModalBody>
|
||||
<div className="input-group">
|
||||
{this.state.url && (
|
||||
<img
|
||||
alt="Image broken"
|
||||
src={this.state.url}
|
||||
style={{ display: "block", width: "100%" }}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</ModalBody>
|
||||
<div className="modal-footer">
|
||||
{
|
||||
<button className="btn btn-link" onClick={hidePreviewModal}>
|
||||
Cancel
|
||||
</button>
|
||||
}
|
||||
</div>
|
||||
</Modal>
|
||||
)
|
||||
}
|
||||
}
|
||||
export default PreviewObjectModal
|
||||
@@ -77,7 +77,8 @@ export class ShareObjectModal extends React.Component {
|
||||
hideShareObject()
|
||||
}
|
||||
render() {
|
||||
const { shareObjectDetails, shareObject, hideShareObject } = this.props
|
||||
const { shareObjectDetails, hideShareObject } = this.props
|
||||
const url = `${window.location.protocol}//${shareObjectDetails.url}`
|
||||
return (
|
||||
<Modal
|
||||
show={true}
|
||||
@@ -93,11 +94,12 @@ export class ShareObjectModal extends React.Component {
|
||||
type="text"
|
||||
ref={node => (this.copyTextInput = node)}
|
||||
readOnly="readOnly"
|
||||
value={window.location.protocol + "//" + shareObjectDetails.url}
|
||||
value={url}
|
||||
onClick={() => this.copyTextInput.select()}
|
||||
/>
|
||||
</div>
|
||||
<div
|
||||
{shareObjectDetails.showExpiryDate && (
|
||||
<div
|
||||
className="input-group"
|
||||
style={{ display: web.LoggedIn() ? "block" : "none" }}
|
||||
>
|
||||
@@ -174,10 +176,11 @@ export class ShareObjectModal extends React.Component {
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</ModalBody>
|
||||
<div className="modal-footer">
|
||||
<CopyToClipboard
|
||||
text={window.location.protocol + "//" + shareObjectDetails.url}
|
||||
text={url}
|
||||
onCopy={this.onUrlCopied.bind(this)}
|
||||
>
|
||||
<button className="btn btn-success">Copy Link</button>
|
||||
|
||||
@@ -66,6 +66,49 @@ describe("ObjectActions", () => {
|
||||
expect(deleteObject).toHaveBeenCalledWith("obj1")
|
||||
})
|
||||
|
||||
|
||||
|
||||
|
||||
it("should show PreviewObjectModal when preview action is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1", contentType: "image/jpeg"}}
|
||||
currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.at(1)
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(wrapper.state("showPreview")).toBeTruthy()
|
||||
expect(wrapper.find("PreviewObjectModal").length).toBe(1)
|
||||
})
|
||||
|
||||
it("should hide PreviewObjectModal when cancel button is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1" , contentType: "image/jpeg"}}
|
||||
currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.at(1)
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
wrapper.find("PreviewObjectModal").prop("hidePreviewModal")()
|
||||
wrapper.update()
|
||||
expect(wrapper.state("showPreview")).toBeFalsy()
|
||||
expect(wrapper.find("PreviewObjectModal").length).toBe(0)
|
||||
})
|
||||
it("should not show PreviewObjectModal when preview action is clicked if object is not an image", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1"}}
|
||||
currentPrefix={"pre1/"} />
|
||||
)
|
||||
expect(wrapper
|
||||
.find("a")
|
||||
.length).toBe(2) // find only the other 2
|
||||
})
|
||||
|
||||
it("should call shareObject with object and expiry", () => {
|
||||
const shareObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
|
||||
@@ -34,7 +34,7 @@ describe("ShareObjectModal", () => {
|
||||
shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
/>
|
||||
)
|
||||
})
|
||||
@@ -44,7 +44,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
hideShareObject={hideShareObject}
|
||||
/>
|
||||
)
|
||||
@@ -59,7 +59,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
@@ -76,7 +76,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
hideShareObject={hideShareObject}
|
||||
showCopyAlert={showCopyAlert}
|
||||
/>
|
||||
@@ -89,8 +89,15 @@ describe("ShareObjectModal", () => {
|
||||
describe("Update expiry values", () => {
|
||||
const props = {
|
||||
object: { name: "obj1" },
|
||||
shareObjectDetails: { show: true, object: "obj1", url: "test" }
|
||||
shareObjectDetails: { show: true, object: "obj1", url: "test", showExpiryDate: true }
|
||||
}
|
||||
|
||||
it("should not show expiry values if shared with public link", () => {
|
||||
const shareObjectDetails = { show: true, object: "obj1", url: "test", showExpiryDate: false }
|
||||
const wrapper = shallow(<ShareObjectModal {...props} shareObjectDetails={shareObjectDetails} />)
|
||||
expect(wrapper.find('.set-expire').exists()).toEqual(false)
|
||||
})
|
||||
|
||||
it("should have default expiry values", () => {
|
||||
const wrapper = shallow(<ShareObjectModal {...props} />)
|
||||
expect(wrapper.state("expiry")).toEqual({
|
||||
|
||||
@@ -34,6 +34,7 @@ jest.mock("../../web", () => ({
|
||||
.mockReturnValueOnce(false)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(false),
|
||||
ListObjects: jest.fn(({ bucketName }) => {
|
||||
if (bucketName === "test-deny") {
|
||||
@@ -69,7 +70,14 @@ jest.mock("../../web", () => ({
|
||||
})
|
||||
.mockImplementationOnce(() => {
|
||||
return Promise.resolve({ token: "test" })
|
||||
})
|
||||
}),
|
||||
GetBucketPolicy: jest.fn(({ bucketName, prefix }) => {
|
||||
if (!bucketName) {
|
||||
return Promise.reject({ message: "Invalid bucket" })
|
||||
}
|
||||
if (bucketName === 'test-public') return Promise.resolve({ policy: 'readonly' })
|
||||
return Promise.resolve({})
|
||||
})
|
||||
}))
|
||||
|
||||
const middlewares = [thunk]
|
||||
@@ -295,7 +303,8 @@ describe("Objects actions", () => {
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "b.txt",
|
||||
url: "test"
|
||||
url: "test",
|
||||
showExpiryDate: true
|
||||
}
|
||||
]
|
||||
store.dispatch(actionsObjects.showShareObject("b.txt", "test"))
|
||||
@@ -321,14 +330,16 @@ describe("Objects actions", () => {
|
||||
it("creates objects/SET_SHARE_OBJECT when object is shared", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "bk1" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: {} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "a.txt",
|
||||
url: "https://test.com/bk1/pre1/b.txt"
|
||||
url: "https://test.com/bk1/pre1/b.txt",
|
||||
showExpiryDate: true
|
||||
},
|
||||
{
|
||||
type: "alert/SET",
|
||||
@@ -347,10 +358,42 @@ describe("Objects actions", () => {
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/SET_SHARE_OBJECT when object is shared with public link", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "test-public" },
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: { info: { domains: ['public.com'] }} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "a.txt",
|
||||
url: "public.com/test-public/pre1/a.txt",
|
||||
showExpiryDate: false
|
||||
},
|
||||
{
|
||||
type: "alert/SET",
|
||||
alert: {
|
||||
type: "success",
|
||||
message: "Object shared.",
|
||||
id: alertActions.alertId
|
||||
}
|
||||
}
|
||||
]
|
||||
return store
|
||||
.dispatch(actionsObjects.shareObject("a.txt", 1, 0, 0))
|
||||
.then(() => {
|
||||
const actions = store.getActions()
|
||||
expect(actions).toEqual(expectedActions)
|
||||
})
|
||||
})
|
||||
|
||||
it("creates alert/SET when shareObject is failed", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: {} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
|
||||
@@ -19,20 +19,20 @@ import history from "../history"
|
||||
import {
|
||||
sortObjectsByName,
|
||||
sortObjectsBySize,
|
||||
sortObjectsByDate
|
||||
sortObjectsByDate,
|
||||
} from "../utils"
|
||||
import { getCurrentBucket } from "../buckets/selectors"
|
||||
import { getCurrentPrefix, getCheckedList } from "./selectors"
|
||||
import * as alertActions from "../alert/actions"
|
||||
import * as bucketActions from "../buckets/actions"
|
||||
import {
|
||||
minioBrowserPrefix,
|
||||
SORT_BY_NAME,
|
||||
SORT_BY_SIZE,
|
||||
SORT_BY_LAST_MODIFIED,
|
||||
SORT_ORDER_ASC,
|
||||
SORT_ORDER_DESC
|
||||
SORT_ORDER_DESC,
|
||||
} from "../constants"
|
||||
import { getServerInfo, hasServerPublicDomain } from '../browser/selectors'
|
||||
|
||||
export const SET_LIST = "objects/SET_LIST"
|
||||
export const RESET_LIST = "objects/RESET_LIST"
|
||||
@@ -48,35 +48,35 @@ export const CHECKED_LIST_REMOVE = "objects/CHECKED_LIST_REMOVE"
|
||||
export const CHECKED_LIST_RESET = "objects/CHECKED_LIST_RESET"
|
||||
export const SET_LIST_LOADING = "objects/SET_LIST_LOADING"
|
||||
|
||||
export const setList = objects => ({
|
||||
export const setList = (objects) => ({
|
||||
type: SET_LIST,
|
||||
objects
|
||||
objects,
|
||||
})
|
||||
|
||||
export const resetList = () => ({
|
||||
type: RESET_LIST
|
||||
type: RESET_LIST,
|
||||
})
|
||||
|
||||
export const setListLoading = listLoading => ({
|
||||
export const setListLoading = (listLoading) => ({
|
||||
type: SET_LIST_LOADING,
|
||||
listLoading
|
||||
listLoading,
|
||||
})
|
||||
|
||||
export const fetchObjects = () => {
|
||||
return function(dispatch, getState) {
|
||||
return function (dispatch, getState) {
|
||||
dispatch(resetList())
|
||||
const {
|
||||
buckets: { currentBucket },
|
||||
objects: { currentPrefix }
|
||||
objects: { currentPrefix },
|
||||
} = getState()
|
||||
if (currentBucket) {
|
||||
dispatch(setListLoading(true))
|
||||
return web
|
||||
.ListObjects({
|
||||
bucketName: currentBucket,
|
||||
prefix: currentPrefix
|
||||
prefix: currentPrefix,
|
||||
})
|
||||
.then(res => {
|
||||
.then((res) => {
|
||||
// we need to check if the bucket name and prefix are the same as
|
||||
// when the request was made before updating the displayed objects
|
||||
if (
|
||||
@@ -85,10 +85,10 @@ export const fetchObjects = () => {
|
||||
) {
|
||||
let objects = []
|
||||
if (res.objects) {
|
||||
objects = res.objects.map(object => {
|
||||
objects = res.objects.map((object) => {
|
||||
return {
|
||||
...object,
|
||||
name: object.name.replace(currentPrefix, "")
|
||||
name: object.name.replace(currentPrefix, ""),
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -104,13 +104,13 @@ export const fetchObjects = () => {
|
||||
dispatch(setListLoading(false))
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
.catch((err) => {
|
||||
if (web.LoggedIn()) {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
autoClear: true
|
||||
autoClear: true,
|
||||
})
|
||||
)
|
||||
dispatch(resetList())
|
||||
@@ -123,8 +123,8 @@ export const fetchObjects = () => {
|
||||
}
|
||||
}
|
||||
|
||||
export const sortObjects = sortBy => {
|
||||
return function(dispatch, getState) {
|
||||
export const sortObjects = (sortBy) => {
|
||||
return function (dispatch, getState) {
|
||||
const { objects } = getState()
|
||||
let sortOrder = SORT_ORDER_ASC
|
||||
// Reverse sort order if the list is already sorted on same field
|
||||
@@ -149,18 +149,18 @@ const sortObjectsList = (list, sortBy, sortOrder) => {
|
||||
}
|
||||
}
|
||||
|
||||
export const setSortBy = sortBy => ({
|
||||
export const setSortBy = (sortBy) => ({
|
||||
type: SET_SORT_BY,
|
||||
sortBy
|
||||
sortBy,
|
||||
})
|
||||
|
||||
export const setSortOrder = sortOrder => ({
|
||||
export const setSortOrder = (sortOrder) => ({
|
||||
type: SET_SORT_ORDER,
|
||||
sortOrder
|
||||
sortOrder,
|
||||
})
|
||||
|
||||
export const selectPrefix = prefix => {
|
||||
return function(dispatch, getState) {
|
||||
export const selectPrefix = (prefix) => {
|
||||
return function (dispatch, getState) {
|
||||
dispatch(setCurrentPrefix(prefix))
|
||||
dispatch(fetchObjects())
|
||||
dispatch(resetCheckedList())
|
||||
@@ -169,49 +169,49 @@ export const selectPrefix = prefix => {
|
||||
}
|
||||
}
|
||||
|
||||
export const setCurrentPrefix = prefix => {
|
||||
export const setCurrentPrefix = (prefix) => {
|
||||
return {
|
||||
type: SET_CURRENT_PREFIX,
|
||||
prefix
|
||||
prefix,
|
||||
}
|
||||
}
|
||||
|
||||
export const setPrefixWritable = prefixWritable => ({
|
||||
export const setPrefixWritable = (prefixWritable) => ({
|
||||
type: SET_PREFIX_WRITABLE,
|
||||
prefixWritable
|
||||
prefixWritable,
|
||||
})
|
||||
|
||||
export const deleteObject = object => {
|
||||
return function(dispatch, getState) {
|
||||
export const deleteObject = (object) => {
|
||||
return function (dispatch, getState) {
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
return web
|
||||
.RemoveObject({
|
||||
bucketName: currentBucket,
|
||||
objects: [objectName]
|
||||
objects: [objectName],
|
||||
})
|
||||
.then(() => {
|
||||
dispatch(removeObject(object))
|
||||
})
|
||||
.catch(e => {
|
||||
.catch((e) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: e.message
|
||||
message: e.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export const removeObject = object => ({
|
||||
export const removeObject = (object) => ({
|
||||
type: REMOVE,
|
||||
object
|
||||
object,
|
||||
})
|
||||
|
||||
export const deleteCheckedObjects = () => {
|
||||
return function(dispatch, getState) {
|
||||
return function (dispatch, getState) {
|
||||
const checkedObjects = getCheckedList(getState())
|
||||
for (let i = 0; i < checkedObjects.length; i++) {
|
||||
dispatch(deleteObject(checkedObjects[i]))
|
||||
@@ -221,33 +221,52 @@ export const deleteCheckedObjects = () => {
|
||||
}
|
||||
|
||||
export const shareObject = (object, days, hours, minutes) => {
|
||||
return function(dispatch, getState) {
|
||||
return function (dispatch, getState) {
|
||||
const hasServerDomain = hasServerPublicDomain(getState())
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
const expiry = days * 24 * 60 * 60 + hours * 60 * 60 + minutes * 60
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName,
|
||||
expiry: expiry
|
||||
.GetBucketPolicy({ bucketName: currentBucket, prefix: currentPrefix })
|
||||
.catch(() => ({ policy: null }))
|
||||
.then(({ policy }) => {
|
||||
if (hasServerDomain && ['readonly', 'readwrite'].includes(policy)) {
|
||||
const domain = getServerInfo(getState()).info.domains[0]
|
||||
const url = `${domain}/${currentBucket}/${encodeURI(objectName)}`
|
||||
dispatch(showShareObject(object, url, false))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: "Object shared."
|
||||
})
|
||||
)
|
||||
} else {
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName,
|
||||
expiry: expiry
|
||||
})
|
||||
}
|
||||
})
|
||||
.then(obj => {
|
||||
.then((obj) => {
|
||||
if (!obj) return
|
||||
dispatch(showShareObject(object, obj.url))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`,
|
||||
})
|
||||
)
|
||||
})
|
||||
.catch(err => {
|
||||
.catch((err) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
@@ -265,29 +284,29 @@ export const shareObject = (object, days, hours, minutes) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared.`
|
||||
message: `Object shared.`,
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const showShareObject = (object, url) => ({
|
||||
export const showShareObject = (object, url, showExpiryDate = true) => ({
|
||||
type: SET_SHARE_OBJECT,
|
||||
show: true,
|
||||
object,
|
||||
url
|
||||
url,
|
||||
showExpiryDate,
|
||||
})
|
||||
|
||||
export const hideShareObject = (object, url) => ({
|
||||
type: SET_SHARE_OBJECT,
|
||||
show: false,
|
||||
object: "",
|
||||
url: ""
|
||||
url: "",
|
||||
})
|
||||
|
||||
export const downloadObject = object => {
|
||||
return function(dispatch, getState) {
|
||||
export const getObjectURL = (object, callback) => {
|
||||
return function (dispatch, getState) {
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
@@ -295,52 +314,73 @@ export const downloadObject = object => {
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then(res => {
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${
|
||||
res.token
|
||||
}`
|
||||
window.location = url
|
||||
.then((res) => {
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${res.token}`
|
||||
callback(url)
|
||||
})
|
||||
.catch(err => {
|
||||
.catch((err) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
} else {
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
callback(url)
|
||||
}
|
||||
}
|
||||
}
|
||||
export const downloadObject = (object) => {
|
||||
return function (dispatch, getState) {
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
const encObjectName = encodeURI(objectName)
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then((res) => {
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${res.token}`
|
||||
window.location = url
|
||||
})
|
||||
.catch((err) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
} else {
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
window.location = url
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const checkObject = object => ({
|
||||
export const checkObject = (object) => ({
|
||||
type: CHECKED_LIST_ADD,
|
||||
object
|
||||
object,
|
||||
})
|
||||
|
||||
export const uncheckObject = object => ({
|
||||
export const uncheckObject = (object) => ({
|
||||
type: CHECKED_LIST_REMOVE,
|
||||
object
|
||||
object,
|
||||
})
|
||||
|
||||
export const resetCheckedList = () => ({
|
||||
type: CHECKED_LIST_RESET
|
||||
type: CHECKED_LIST_RESET,
|
||||
})
|
||||
|
||||
export const downloadCheckedObjects = () => {
|
||||
return function(dispatch, getState) {
|
||||
return function (dispatch, getState) {
|
||||
const state = getState()
|
||||
const req = {
|
||||
bucketName: getCurrentBucket(state),
|
||||
prefix: getCurrentPrefix(state),
|
||||
objects: getCheckedList(state)
|
||||
objects: getCheckedList(state),
|
||||
}
|
||||
if (!web.LoggedIn()) {
|
||||
const requestUrl = location.origin + "/minio/zip?token="
|
||||
@@ -348,17 +388,15 @@ export const downloadCheckedObjects = () => {
|
||||
} else {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then(res => {
|
||||
const requestUrl = `${
|
||||
location.origin
|
||||
}${minioBrowserPrefix}/zip?token=${res.token}`
|
||||
.then((res) => {
|
||||
const requestUrl = `${location.origin}${minioBrowserPrefix}/zip?token=${res.token}`
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
})
|
||||
.catch(err =>
|
||||
.catch((err) =>
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
)
|
||||
@@ -374,11 +412,11 @@ const downloadZip = (url, req, dispatch) => {
|
||||
xhr.open("POST", url, true)
|
||||
xhr.responseType = "blob"
|
||||
|
||||
xhr.onload = function(e) {
|
||||
xhr.onload = function (e) {
|
||||
if (this.status == 200) {
|
||||
dispatch(resetCheckedList())
|
||||
var blob = new Blob([this.response], {
|
||||
type: "octet/stream"
|
||||
type: "octet/stream",
|
||||
})
|
||||
var blobUrl = window.URL.createObjectURL(blob)
|
||||
var separator = req.prefix.length > 1 ? "-" : ""
|
||||
|
||||
@@ -89,7 +89,8 @@ export default (
|
||||
shareObject: {
|
||||
show: action.show,
|
||||
object: action.object,
|
||||
url: action.url
|
||||
url: action.url,
|
||||
showExpiryDate: action.showExpiryDate
|
||||
}
|
||||
}
|
||||
case actionsObjects.CHECKED_LIST_ADD:
|
||||
|
||||
1
browser/staticcheck.conf
Normal file
1
browser/staticcheck.conf
Normal file
@@ -0,0 +1 @@
|
||||
checks = ["all", "-ST1005", "-ST1000", "-SA4000", "-SA9004", "-SA1019", "-SA1008", "-U1000", "-ST1003", "-ST1018"]
|
||||
File diff suppressed because one or more lines are too long
@@ -1,67 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# usage: ./benchcmp.sh <commit-sha1> <commit-sha2>
|
||||
# Exit on any non zero return value on execution of a command.
|
||||
set -e
|
||||
|
||||
# path of benchcmp.
|
||||
benchcmp=${GOPATH}/bin/benchcmp
|
||||
|
||||
# function which runs the benchmark comparison.
|
||||
RunBenchCmp () {
|
||||
# Path for storing output of benchmark at commit 1.
|
||||
commit1Bench=/tmp/minio-$1.bench
|
||||
# Path for storing output of benchmark at commit 2.
|
||||
commit2Bench=/tmp/minio-$2.bench
|
||||
# switch to commit $1.
|
||||
git checkout $1
|
||||
# Check if the benchmark results for given commit 1 already exists.
|
||||
# Benchmarks are time/resource consuming operations, run only if the the results doesn't exist.
|
||||
if [[ ! -f $commit1Bench ]]
|
||||
then
|
||||
|
||||
echo "Running benchmarks at $1"
|
||||
go test -run=NONE -bench=. | tee $commit1Bench
|
||||
fi
|
||||
# get back to the commit from which it was started.
|
||||
git checkout -
|
||||
echo "Checking into commit $2"
|
||||
# switch to commit $2
|
||||
git checkout $2
|
||||
# Check if the benchmark results for given commit 2 already exists.
|
||||
# Benchmarks are time/resource consuming operations, run only if the the results doesn't exist.
|
||||
if [[ ! -f $commit2Bench ]]
|
||||
then
|
||||
# Running benchmarks at $2.
|
||||
echo "Running benchmarks at $2"
|
||||
go test -run=NONE -bench=. | tee $commit2Bench
|
||||
fi
|
||||
|
||||
# get back to the commit from which it was started.
|
||||
git checkout -
|
||||
# Comparing the benchmarks.
|
||||
echo "Running benchmark comparison between $1 and $2 ..."
|
||||
$benchcmp $commit1Bench $commit2Bench
|
||||
echo "Done."
|
||||
}
|
||||
|
||||
# check if 2 commit SHA's of snapshots of code for which benchmp has to be done is provided.
|
||||
if [ ! $# -eq 2 ]
|
||||
then
|
||||
# exit if commit SHA's are not provided.
|
||||
echo $#
|
||||
echo "Need Commit SHA's of 2 snapshots to be supplied to run benchmark comparison."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# check if benchcmp exists.
|
||||
if [[ -x "$benchcmp" ]]
|
||||
then
|
||||
RunBenchCmp $1 $2
|
||||
else
|
||||
# install benchcmp if doesnt't exist.
|
||||
echo "fetching Benchcmp..."
|
||||
go get -u golang.org/x/tools/cmd/benchcmp
|
||||
echo "Done."
|
||||
RunBenchCmp $1 $2
|
||||
fi
|
||||
@@ -1,14 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
# Enable tracing if set.
|
||||
[ -n "$BASH_XTRACEFD" ] && set -ex
|
||||
[ -n "$BASH_XTRACEFD" ] && set -x
|
||||
|
||||
function _init() {
|
||||
## All binaries are static make sure to disable CGO.
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
@@ -19,11 +20,11 @@ function _build() {
|
||||
package=$(go list -f '{{.ImportPath}}')
|
||||
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
|
||||
|
||||
# Go build to build the binary.
|
||||
# go build -trimpath to build the binary.
|
||||
export GOOS=$os
|
||||
export GOARCH=$arch
|
||||
export GO111MODULE=on
|
||||
go build -tags kqueue -o /dev/null
|
||||
go build -trimpath -tags kqueue -o /dev/null
|
||||
}
|
||||
|
||||
function main() {
|
||||
|
||||
@@ -46,7 +46,10 @@ function main()
|
||||
gw_pid="$(start_minio_gateway_s3)"
|
||||
|
||||
SERVER_ENDPOINT=127.0.0.1:24240 ENABLE_HTTPS=0 ACCESS_KEY=minio \
|
||||
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh
|
||||
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh \
|
||||
aws-sdk-go aws-sdk-java aws-sdk-php aws-sdk-ruby awscli \
|
||||
healthcheck mc minio-dotnet minio-js \
|
||||
minio-py s3cmd s3select security
|
||||
rv=$?
|
||||
|
||||
kill "$sr_pid"
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
7
buildscripts/race.sh
Executable file
7
buildscripts/race.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
CGO_ENABLED=1 go test -v -race --timeout 50m "$d"
|
||||
done
|
||||
@@ -154,34 +154,6 @@ function run_test_erasure_sets() {
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_dist_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure_sets_ipv6) )
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 9); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 9); do
|
||||
rm -f "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_zone_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets) )
|
||||
|
||||
126
buildscripts/verify-healing.sh
Executable file
126
buildscripts/verify-healing.sh
Executable file
@@ -0,0 +1,126 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
set -e
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
function start_minio_3_node() {
|
||||
declare -a minio_pids
|
||||
declare -a ARGS
|
||||
export MINIO_ACCESS_KEY=minio
|
||||
export MINIO_SECRET_KEY=minio123
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
for i in $(seq 1 3); do
|
||||
ARGS+=("http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/")
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+1]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
disown "${minio_pids[0]}"
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+2]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
disown "${minio_pids[1]}"
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+3]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
disown "${minio_pids[2]}"
|
||||
|
||||
sleep "$1"
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
if ! kill "$pid"; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
# forcibly killing, to proceed further properly.
|
||||
kill -9 "$pid"
|
||||
sleep 1 # wait 1sec per pid
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
function check_online() {
|
||||
if grep -q 'Server switching to safe mode' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
fi
|
||||
}
|
||||
|
||||
function purge()
|
||||
{
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
function __init__()
|
||||
{
|
||||
echo "Initializing environment"
|
||||
mkdir -p "$WORK_DIR"
|
||||
mkdir -p "$MINIO_CONFIG_DIR"
|
||||
|
||||
## version is purposefully set to '3' for minio to migrate configuration file
|
||||
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' > "$MINIO_CONFIG_DIR/config.json"
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node 60
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
start_minio_3_node 60
|
||||
|
||||
rv=$(check_online)
|
||||
if [ "$rv" == "1" ]; then
|
||||
pkill -9 minio
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function main()
|
||||
{
|
||||
perform_test "2"
|
||||
perform_test "1"
|
||||
perform_test "3"
|
||||
}
|
||||
|
||||
( __init__ "$@" && main "$@" )
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2018-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -18,11 +18,14 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
// Data types used for returning dummy access control
|
||||
@@ -50,6 +53,71 @@ type accessControlPolicy struct {
|
||||
} `xml:"AccessControlList"`
|
||||
}
|
||||
|
||||
// PutBucketACLHandler - PUT Bucket ACL
|
||||
// -----------------
|
||||
// This operation uses the ACL subresource
|
||||
// to set ACL for a bucket, this is a dummy call
|
||||
// only responds success if the ACL is private.
|
||||
func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketACL", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow putBucketACL if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
aclHeader := r.Header.Get(xhttp.AmzACL)
|
||||
if aclHeader == "" {
|
||||
acl := &accessControlPolicy{}
|
||||
if err = xmlDecoder(r.Body, acl, r.ContentLength); err != nil {
|
||||
if err == io.EOF {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingSecurityHeader),
|
||||
r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if len(acl.AccessControlList.Grants) == 0 {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if aclHeader != "" && aclHeader != "private" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketACLHandler - GET Bucket ACL
|
||||
// -----------------
|
||||
// This operation uses the ACL
|
||||
@@ -99,6 +167,71 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// PutObjectACLHandler - PUT Object ACL
|
||||
// -----------------
|
||||
// This operation uses the ACL subresource
|
||||
// to set ACL for a bucket, this is a dummy call
|
||||
// only responds success if the ACL is private.
|
||||
func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutObjectACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutObjectACL", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := url.PathUnescape(vars["object"])
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow putObjectACL if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if object exists.
|
||||
_, err = objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
aclHeader := r.Header.Get(xhttp.AmzACL)
|
||||
if aclHeader == "" {
|
||||
acl := &accessControlPolicy{}
|
||||
if err = xmlDecoder(r.Body, acl, r.ContentLength); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if len(acl.AccessControlList.Grants) == 0 {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if aclHeader != "" && aclHeader != "private" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetObjectACLHandler - GET Object ACL
|
||||
// -----------------
|
||||
// This operation uses the ACL
|
||||
@@ -110,7 +243,11 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
object, err := url.PathUnescape(vars["object"])
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -126,7 +263,7 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
// Before proceeding validate if object exists.
|
||||
_, err := objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
_, err = objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
@@ -35,50 +35,47 @@ import (
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) ObjectLayer {
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil
|
||||
return auth.Credentials{}, nil
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
|
||||
cred, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
return objectAPI
|
||||
return cred, objectAPI
|
||||
}
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v2/del-config-kv
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
|
||||
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DelConfigKVHandler")
|
||||
ctx := newContext(r, w, "DeleteConfigKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "DeleteConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
@@ -103,28 +100,24 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
// SetConfigKVHandler - PUT /minio/admin/v2/set-config-kv
|
||||
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
|
||||
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigKVHandler")
|
||||
ctx := newContext(r, w, "SetConfigKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "SetConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
@@ -162,17 +155,19 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
// Make sure to write backend is encrypted
|
||||
if globalConfigEncrypted {
|
||||
saveConfig(context.Background(), objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetConfigKVHandler - GET /minio/admin/v2/get-config-kv?key={key}
|
||||
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}
|
||||
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigKVHandler")
|
||||
ctx := newContext(r, w, "GetConfigKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "GetConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -195,7 +190,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, buf.Bytes())
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -206,9 +201,11 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ClearConfigHistoryKVHandler")
|
||||
ctx := newContext(r, w, "ClearConfigHistoryKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "ClearConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -241,9 +238,11 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
|
||||
|
||||
// RestoreConfigHistoryKVHandler - restores a config with KV settings for the given KV id.
|
||||
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RestoreConfigHistoryKVHandler")
|
||||
ctx := newContext(r, w, "RestoreConfigHistoryKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "RestoreConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -287,9 +286,11 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
|
||||
// ListConfigHistoryKVHandler - lists all the KV ids.
|
||||
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListConfigHistoryKVHandler")
|
||||
ctx := newContext(r, w, "ListConfigHistoryKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "ListConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -313,7 +314,7 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -323,11 +324,13 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
// HelpConfigKVHandler - GET /minio/admin/v2/help-config-kv?subSys={subSys}&key={key}
|
||||
// HelpConfigKVHandler - GET /minio/admin/v3/help-config-kv?subSys={subSys}&key={key}
|
||||
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HelpConfigKVHandler")
|
||||
ctx := newContext(r, w, "HelpConfigKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "HelpHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -349,28 +352,24 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// SetConfigHandler - PUT /minio/admin/v2/config
|
||||
// SetConfigHandler - PUT /minio/admin/v3/config
|
||||
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigHandler")
|
||||
ctx := newContext(r, w, "SetConfig")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "SetConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
@@ -403,18 +402,20 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
|
||||
// Make sure to write backend is encrypted
|
||||
if globalConfigEncrypted {
|
||||
saveConfig(context.Background(), objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetConfigHandler - GET /minio/admin/v2/config
|
||||
// GetConfigHandler - GET /minio/admin/v3/config
|
||||
// Get config.json of this minio setup.
|
||||
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigHandler")
|
||||
ctx := newContext(r, w, "GetConfig")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "GetConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -471,7 +472,7 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, []byte(s.String()))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
||||
159
cmd/admin-handlers-quota.go
Normal file
159
cmd/admin-handlers-quota.go
Normal file
@@ -0,0 +1,159 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
bucketQuotaConfigFile = "quota.json"
|
||||
)
|
||||
|
||||
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
|
||||
// ----------
|
||||
// Places a quota configuration on the specified bucket. The quota
|
||||
// specified in the quota configuration will be applied by default
|
||||
// to enforce total quota for the specified bucket.
|
||||
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Turn off quota commands if data usage info is unavailable.
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminBucketQuotaDisabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
defer r.Body.Close()
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
quotaCfg, err := parseBucketQuota(data)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketQuotaConfigFile)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if quotaCfg.Quota > 0 {
|
||||
globalBucketQuotaSys.Set(bucket, quotaCfg)
|
||||
globalNotificationSys.PutBucketQuotaConfig(ctx, bucket, quotaCfg)
|
||||
|
||||
} else {
|
||||
globalBucketQuotaSys.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketQuotaConfig(ctx, bucket)
|
||||
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketQuotaConfigHandler - gets bucket quota configuration
|
||||
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketQuotaConfigFile)
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
if err != nil {
|
||||
if err != errConfigNotFound {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, BucketQuotaConfigNotFound{Bucket: bucket}), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, configData)
|
||||
}
|
||||
|
||||
// RemoveBucketQuotaConfigHandler - removes Bucket quota configuration.
|
||||
// ----------
|
||||
// Removes quota configuration on the specified bucket.
|
||||
func (a adminAPIHandlers) RemoveBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketQuotaConfigFile)
|
||||
if err := deleteConfig(ctx, objectAPI, configFile); err != nil {
|
||||
if err != errConfigNotFound {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, BucketQuotaConfigNotFound{Bucket: bucket}), r.URL)
|
||||
return
|
||||
}
|
||||
globalBucketQuotaSys.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketQuotaConfig(ctx, bucket)
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -51,21 +51,17 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
return objectAPI, cred
|
||||
}
|
||||
|
||||
// RemoveUser - DELETE /minio/admin/v2/remove-user?accessKey=<access_key>
|
||||
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
|
||||
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveUser")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveUser", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
@@ -93,15 +89,19 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// ListUsers - GET /minio/admin/v2/list-users
|
||||
// ListUsers - GET /minio/admin/v3/list-users
|
||||
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListUsers")
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
defer logger.AuditLog(w, r, "ListUsers", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
|
||||
allCredentials, err := globalIAMSys.ListUsers()
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -114,7 +114,6 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -124,10 +123,12 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
// GetUserInfo - GET /minio/admin/v2/user-info
|
||||
// GetUserInfo - GET /minio/admin/v3/user-info
|
||||
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetUserInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetUserInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -151,10 +152,12 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// UpdateGroupMembers - PUT /minio/admin/v2/update-group-members
|
||||
// UpdateGroupMembers - PUT /minio/admin/v3/update-group-members
|
||||
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "UpdateGroupMembers")
|
||||
|
||||
defer logger.AuditLog(w, r, "UpdateGroupMembers", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -194,10 +197,12 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
// GetGroup - /minio/admin/v2/group?group=mygroup1
|
||||
// GetGroup - /minio/admin/v3/group?group=mygroup1
|
||||
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetGroup", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -221,10 +226,12 @@ func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// ListGroups - GET /minio/admin/v2/groups
|
||||
// ListGroups - GET /minio/admin/v3/groups
|
||||
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListGroups")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListGroups", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -245,10 +252,12 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SetGroupStatus - PUT /minio/admin/v2/set-group-status?group=mygroup1&status=enabled
|
||||
// SetGroupStatus - PUT /minio/admin/v3/set-group-status?group=mygroup1&status=enabled
|
||||
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetGroupStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetGroupStatus", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -280,21 +289,17 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
}
|
||||
|
||||
// SetUserStatus - PUT /minio/admin/v2/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
|
||||
// SetUserStatus - PUT /minio/admin/v3/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
|
||||
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetUserStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetUserStatus", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
status := vars["status"]
|
||||
@@ -319,21 +324,17 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
}
|
||||
|
||||
// AddUser - PUT /minio/admin/v2/add-user?accessKey=<access_key>
|
||||
// AddUser - PUT /minio/admin/v3/add-user?accessKey=<access_key>
|
||||
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddUser")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddUser", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.CreateUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
@@ -378,16 +379,322 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// InfoCannedPolicy - GET /minio/admin/v2/info-canned-policy?name={policyName}
|
||||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicy")
|
||||
// AddServiceAccount - PUT /minio/admin/v3/add-service-account
|
||||
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var createReq madmin.AddServiceAccountReq
|
||||
if err = json.Unmarshal(reqBytes, &createReq); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, createReq.Policy)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other Minio peers to reload user the service account
|
||||
for _, nerr := range globalNotificationSys.LoadServiceAccount(newCred.AccessKey) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
var createResp = madmin.AddServiceAccountResp{
|
||||
Credentials: auth.Credentials{
|
||||
AccessKey: newCred.AccessKey,
|
||||
SecretKey: newCred.SecretKey,
|
||||
},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(createResp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// ListServiceAccounts - GET /minio/admin/v3/list-service-accounts
|
||||
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListServiceAccounts")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, parentUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var listResp = madmin.ListServiceAccountsResp{
|
||||
Accounts: serviceAccounts,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(listResp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// DeleteServiceAccount - DELETE /minio/admin/v3/delete-service-account
|
||||
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
serviceAccount := mux.Vars(r)["accessKey"]
|
||||
if serviceAccount == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
user, err := globalIAMSys.GetServiceAccountParent(ctx, serviceAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
if parentUser != user || user == "" {
|
||||
// The service account belongs to another user but return not
|
||||
// found error to mitigate brute force attacks. or the
|
||||
// serviceAccount doesn't exist.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServiceAccountNotFound), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// AccountUsageInfoHandler returns usage
|
||||
func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AccountUsageInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "AccountUsageInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Set prefix value for "s3:prefix" policy conditionals.
|
||||
r.Header.Set("prefix", "")
|
||||
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
isAllowedAccess := func(bucketName string) (rd, wr bool) {
|
||||
// Use the following trick to filter in place
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
rd = true
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
wr = true
|
||||
}
|
||||
|
||||
return rd, wr
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Load the latest calculated data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
accountName := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
accountName = cred.ParentUser
|
||||
}
|
||||
|
||||
acctInfo := madmin.AccountUsageInfo{
|
||||
AccountName: accountName,
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
rd, wr := isAllowedAccess(bucket.Name)
|
||||
if rd || wr {
|
||||
var size uint64
|
||||
// Fetch the data usage of the current bucket
|
||||
if !dataUsageInfo.LastUpdate.IsZero() && len(dataUsageInfo.BucketsSizes) > 0 {
|
||||
size = dataUsageInfo.BucketsSizes[bucket.Name]
|
||||
}
|
||||
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketUsageInfo{
|
||||
Name: bucket.Name,
|
||||
Created: bucket.Created,
|
||||
Size: size,
|
||||
Access: madmin.AccountAccess{
|
||||
Read: rd,
|
||||
Write: wr,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
usageInfoJSON, err := json.Marshal(acctInfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, usageInfoJSON)
|
||||
}
|
||||
|
||||
// InfoCannedPolicyV2 - GET /minio/admin/v2/info-canned-policy?name={policyName}
|
||||
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicyV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicyV2", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
data, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
|
||||
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -397,9 +704,32 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ListCannedPolicies - GET /minio/admin/v2/list-canned-policies
|
||||
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPolicies")
|
||||
// InfoCannedPolicy - GET /minio/admin/v3/info-canned-policy?name={policyName}
|
||||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(policy)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ListCannedPoliciesV2 - GET /minio/admin/v2/list-canned-policies
|
||||
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPoliciesV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPoliciesV2", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -412,7 +742,16 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(policies); err != nil {
|
||||
policyMap := make(map[string][]byte, len(policies))
|
||||
for k, p := range policies {
|
||||
var err error
|
||||
policyMap[k], err = json.Marshal(p)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err = json.NewEncoder(w).Encode(policyMap); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -420,10 +759,46 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveCannedPolicy - DELETE /minio/admin/v2/remove-canned-policy?name=<policy_name>
|
||||
// ListCannedPolicies - GET /minio/admin/v3/list-canned-policies
|
||||
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPolicies")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPolicies", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
policies, err := globalIAMSys.ListPolicies()
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var newPolicies = make(map[string]iampolicy.Policy)
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
newPolicies[name] = p
|
||||
}
|
||||
if err = json.NewEncoder(w).Encode(newPolicies); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveCannedPolicy - DELETE /minio/admin/v3/remove-canned-policy?name=<policy_name>
|
||||
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -432,12 +807,6 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["name"]
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -452,10 +821,12 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
// AddCannedPolicy - PUT /minio/admin/v2/add-canned-policy?name=<policy_name>
|
||||
// AddCannedPolicy - PUT /minio/admin/v3/add-canned-policy?name=<policy_name>
|
||||
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -464,12 +835,6 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["name"]
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Error out if Content-Length is missing.
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
|
||||
@@ -484,7 +849,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
|
||||
iamPolicy, err := iampolicy.ParseConfig(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -508,10 +873,12 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
}
|
||||
|
||||
// SetPolicyForUserOrGroup - PUT /minio/admin/v2/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetPolicyForUserOrGroup", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -522,15 +889,9 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
entityName := vars["userOrGroup"]
|
||||
isGroup := vars["isGroup"] == "true"
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !isGroup {
|
||||
ok, err := globalIAMSys.IsTempUser(entityName)
|
||||
if err != nil {
|
||||
if err != nil && err != errNoSuchUser {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -43,12 +43,17 @@ type adminXLTestBed struct {
|
||||
|
||||
// prepareAdminXLTestBed - helper function that setups a single-node
|
||||
// XL backend for admin-handler tests.
|
||||
func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) {
|
||||
|
||||
// reset global variables to start afresh.
|
||||
resetTestGlobals()
|
||||
|
||||
// Set globalIsXL to indicate that the setup uses an erasure
|
||||
// code backend.
|
||||
globalIsXL = true
|
||||
|
||||
// Initializing objectLayer for HealFormatHandler.
|
||||
objLayer, xlDirs, xlErr := initTestXLObjLayer()
|
||||
objLayer, xlDirs, xlErr := initTestXLObjLayer(ctx)
|
||||
if xlErr != nil {
|
||||
return nil, xlErr
|
||||
}
|
||||
@@ -63,21 +68,12 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
|
||||
globalEndpoints = mustGetZoneEndpoints(xlDirs...)
|
||||
|
||||
// Set globalIsXL to indicate that the setup uses an erasure
|
||||
// code backend.
|
||||
globalIsXL = true
|
||||
|
||||
// Init global heal state
|
||||
if globalIsXL {
|
||||
globalAllHealState = initHealState()
|
||||
}
|
||||
|
||||
globalConfigSys = NewConfigSys()
|
||||
|
||||
globalIAMSys = NewIAMSys()
|
||||
globalIAMSys.Init(objLayer)
|
||||
globalIAMSys.Init(ctx, objLayer)
|
||||
|
||||
buckets, err := objLayer.ListBuckets(context.Background())
|
||||
buckets, err := objLayer.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -90,7 +86,7 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
|
||||
// Setup admin mgmt REST API handlers.
|
||||
adminRouter := mux.NewRouter()
|
||||
registerAdminRouter(adminRouter, true, true)
|
||||
registerAdminRouter(adminRouter, true, true, false)
|
||||
|
||||
return &adminXLTestBed{
|
||||
xlDirs: xlDirs,
|
||||
@@ -108,20 +104,20 @@ func (atb *adminXLTestBed) TearDown() {
|
||||
|
||||
// initTestObjLayer - Helper function to initialize an XL-based object
|
||||
// layer and set globalObjectAPI.
|
||||
func initTestXLObjLayer() (ObjectLayer, []string, error) {
|
||||
func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
|
||||
xlDirs, err := getRandomDisks(16)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
endpoints := mustGetNewEndpoints(xlDirs...)
|
||||
format, err := waitForFormatXL(true, endpoints, 1, 16, "")
|
||||
storageDisks, format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "")
|
||||
if err != nil {
|
||||
removeRoots(xlDirs)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
globalPolicySys = NewPolicySys()
|
||||
objLayer, err := newXLSets(endpoints, format, 1, 16)
|
||||
objLayer, err := newXLSets(ctx, endpoints, storageDisks, format)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -196,7 +192,10 @@ func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, er
|
||||
// testServicesCmdHandler - parametrizes service subcommand tests on
|
||||
// cmdType value.
|
||||
func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
adminTestBed, err := prepareAdminXLTestBed(ctx)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
@@ -264,10 +263,14 @@ func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
}
|
||||
|
||||
func TestAdminServerInfo(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
adminTestBed, err := prepareAdminXLTestBed(ctx)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
@@ -318,12 +321,12 @@ func TestToAdminAPIErrCode(t *testing.T) {
|
||||
// 3. Non-admin API specific error.
|
||||
{
|
||||
err: errDiskNotFound,
|
||||
expectedAPIErr: toAPIErrorCode(context.Background(), errDiskNotFound),
|
||||
expectedAPIErr: toAPIErrorCode(GlobalContext, errDiskNotFound),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
actualErr := toAdminAPIErrCode(context.Background(), test.err)
|
||||
actualErr := toAdminAPIErrCode(GlobalContext, test.err)
|
||||
if actualErr != test.expectedAPIErr {
|
||||
t.Errorf("Test %d: Expected %v but received %v",
|
||||
i+1, test.expectedAPIErr, actualErr)
|
||||
|
||||
@@ -60,9 +60,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealPushStopNDiscard = fmt.Errorf("heal push stopped due to heal stop signal")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
apiErr := toAPIError(ctx, err)
|
||||
@@ -73,10 +72,6 @@ var (
|
||||
|
||||
// healSequenceStatus - accumulated status of the heal sequence
|
||||
type healSequenceStatus struct {
|
||||
// lock to update this structure as it is concurrently
|
||||
// accessed
|
||||
updateLock *sync.RWMutex
|
||||
|
||||
// summary and detail for failures
|
||||
Summary healStatusSummary `json:"Summary"`
|
||||
FailureDetail string `json:"Detail,omitempty"`
|
||||
@@ -106,19 +101,17 @@ func initHealState() *allHealState {
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
}
|
||||
|
||||
go healState.periodicHealSeqsClean()
|
||||
go healState.periodicHealSeqsClean(GlobalContext)
|
||||
|
||||
return healState
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean() {
|
||||
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
ticker := time.NewTicker(time.Minute * 5)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-time.After(time.Minute * 5):
|
||||
now := UTCNow()
|
||||
ahs.Lock()
|
||||
for path, h := range ahs.healSeqMap {
|
||||
@@ -127,7 +120,7 @@ func (ahs *allHealState) periodicHealSeqsClean() {
|
||||
}
|
||||
}
|
||||
ahs.Unlock()
|
||||
case <-GlobalServiceDoneCh:
|
||||
case <-ctx.Done():
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
return
|
||||
@@ -183,7 +176,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
}
|
||||
|
||||
b, err := json.Marshal(&hsp)
|
||||
return b, toAdminAPIErr(context.Background(), err)
|
||||
return b, toAdminAPIErr(GlobalContext, err)
|
||||
}
|
||||
|
||||
// LaunchNewHealSequence - launches a background routine that performs
|
||||
@@ -202,9 +195,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
existsAndLive := false
|
||||
he, exists := ahs.getHealSequence(h.path)
|
||||
if exists {
|
||||
if !he.hasEnded() || len(he.currentStatus.Items) > 0 {
|
||||
existsAndLive = true
|
||||
}
|
||||
existsAndLive = !he.hasEnded()
|
||||
}
|
||||
|
||||
if existsAndLive {
|
||||
@@ -277,8 +268,8 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
}
|
||||
|
||||
// Take lock to access and update the heal-sequence
|
||||
h.currentStatus.updateLock.Lock()
|
||||
defer h.currentStatus.updateLock.Unlock()
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
numItems := len(h.currentStatus.Items)
|
||||
|
||||
@@ -289,23 +280,27 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
lastResultIndex = h.currentStatus.Items[numItems-1].ResultIndex
|
||||
}
|
||||
|
||||
// After sending status to client, and before relinquishing
|
||||
// the updateLock, reset Item to nil and record the result
|
||||
// index sent to the client.
|
||||
defer func(i int64) {
|
||||
h.lastSentResultIndex = i
|
||||
h.currentStatus.Items = nil
|
||||
}(lastResultIndex)
|
||||
h.lastSentResultIndex = lastResultIndex
|
||||
|
||||
jbytes, err := json.Marshal(h.currentStatus)
|
||||
if err != nil {
|
||||
h.currentStatus.Items = nil
|
||||
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
h.currentStatus.Items = nil
|
||||
|
||||
return jbytes, ErrNone
|
||||
}
|
||||
|
||||
// healSource denotes single entity and heal option.
|
||||
type healSource struct {
|
||||
path string // entity path (format, buckets, objects) to heal
|
||||
opts madmin.HealOpts // optional heal option overrides default setting
|
||||
}
|
||||
|
||||
// healSequence - state for each heal sequence initiated on the
|
||||
// server.
|
||||
type healSequence struct {
|
||||
@@ -315,12 +310,13 @@ type healSequence struct {
|
||||
// path is just pathJoin(bucket, objPrefix)
|
||||
path string
|
||||
|
||||
// List of entities (format, buckets, objects) to heal
|
||||
sourceCh chan string
|
||||
// A channel of entities (format, buckets, objects) to heal
|
||||
sourceCh chan healSource
|
||||
|
||||
// Report healing progress, false if this is a background
|
||||
// healing since currently there is no entity which will
|
||||
// receive realtime healing status
|
||||
// A channel of entities with heal result
|
||||
respCh chan healResult
|
||||
|
||||
// Report healing progress
|
||||
reportProgress bool
|
||||
|
||||
// time at which heal sequence was started
|
||||
@@ -345,33 +341,42 @@ type healSequence struct {
|
||||
// completed
|
||||
traverseAndHealDoneCh chan error
|
||||
|
||||
// channel to signal heal sequence to stop (e.g. from the
|
||||
// heal-stop API)
|
||||
stopSignalCh chan struct{}
|
||||
// canceler to cancel heal sequence.
|
||||
cancelCtx context.CancelFunc
|
||||
|
||||
// the last result index sent to client
|
||||
lastSentResultIndex int64
|
||||
|
||||
// Number of total items scanned
|
||||
scannedItemsCount int64
|
||||
// Number of total items scanned against item type
|
||||
scannedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// Number of total items healed against item type
|
||||
healedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// Number of total items where healing failed against endpoint and drive state
|
||||
healFailedItemsMap map[string]int64
|
||||
|
||||
// The time of the last scan/heal activity
|
||||
lastHealActivity time.Time
|
||||
|
||||
// Holds the request-info for logging
|
||||
ctx context.Context
|
||||
|
||||
// used to lock this structure as it is concurrently accessed
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewHealSequence - creates healSettings, assumes bucket and
|
||||
// objPrefix are already validated.
|
||||
func newHealSequence(bucket, objPrefix, clientAddr string,
|
||||
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||
numDisks int, hs madmin.HealOpts, forceStart bool) *healSequence {
|
||||
|
||||
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
|
||||
reqInfo.AppendTags("prefix", objPrefix)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
|
||||
|
||||
return &healSequence{
|
||||
respCh: make(chan healResult),
|
||||
bucket: bucket,
|
||||
objPrefix: objPrefix,
|
||||
path: pathJoin(bucket, objPrefix),
|
||||
@@ -385,19 +390,92 @@ func newHealSequence(bucket, objPrefix, clientAddr string,
|
||||
Summary: healNotStartedStatus,
|
||||
HealSettings: hs,
|
||||
NumDisks: numDisks,
|
||||
updateLock: &sync.RWMutex{},
|
||||
},
|
||||
traverseAndHealDoneCh: make(chan error),
|
||||
stopSignalCh: make(chan struct{}),
|
||||
cancelCtx: cancel,
|
||||
ctx: ctx,
|
||||
scannedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healFailedItemsMap: make(map[string]int64),
|
||||
}
|
||||
}
|
||||
|
||||
// resetHealStatusCounters - reset the healSequence status counters between
|
||||
// each monthly background heal scanning activity.
|
||||
// This is used only in case of Background healing scenario, where
|
||||
// we use a single long running healSequence which reactively heals
|
||||
// objects passed to the SourceCh.
|
||||
func (h *healSequence) resetHealStatusCounters() {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.currentStatus.Items = []madmin.HealResultItem{}
|
||||
h.lastSentResultIndex = 0
|
||||
h.scannedItemsMap = make(map[madmin.HealItemType]int64)
|
||||
h.healedItemsMap = make(map[madmin.HealItemType]int64)
|
||||
h.healFailedItemsMap = make(map[string]int64)
|
||||
}
|
||||
|
||||
// getScannedItemsCount - returns a count of all scanned items
|
||||
func (h *healSequence) getScannedItemsCount() int64 {
|
||||
var count int64
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
for _, v := range h.scannedItemsMap {
|
||||
count = count + v
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// getScannedItemsMap - returns map of all scanned items against type
|
||||
func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
|
||||
for k, v := range h.scannedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
||||
// getHealedItemsMap - returns the map of all healed items against type
|
||||
func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
|
||||
for k, v := range h.healedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
||||
// gethealFailedItemsMap - returns map of all items where heal failed against
|
||||
// drive endpoint and status
|
||||
func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[string]int64, len(h.healFailedItemsMap))
|
||||
for k, v := range h.healFailedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
||||
// isQuitting - determines if the heal sequence is quitting (due to an
|
||||
// external signal)
|
||||
func (h *healSequence) isQuitting() bool {
|
||||
select {
|
||||
case <-h.stopSignalCh:
|
||||
case <-h.ctx.Done():
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
@@ -406,19 +484,15 @@ func (h *healSequence) isQuitting() bool {
|
||||
|
||||
// check if the heal sequence has ended
|
||||
func (h *healSequence) hasEnded() bool {
|
||||
h.currentStatus.updateLock.RLock()
|
||||
summary := h.currentStatus.Summary
|
||||
h.currentStatus.updateLock.RUnlock()
|
||||
return summary == healStoppedStatus || summary == healFinishedStatus
|
||||
h.mutex.RLock()
|
||||
ended := len(h.currentStatus.Items) == 0 || h.currentStatus.Summary == healStoppedStatus || h.currentStatus.Summary == healFinishedStatus
|
||||
h.mutex.RUnlock()
|
||||
return ended
|
||||
}
|
||||
|
||||
// stops the heal sequence - safe to call multiple times.
|
||||
func (h *healSequence) stop() {
|
||||
select {
|
||||
case <-h.stopSignalCh:
|
||||
default:
|
||||
close(h.stopSignalCh)
|
||||
}
|
||||
h.cancelCtx()
|
||||
}
|
||||
|
||||
// pushHealResultItem - pushes a heal result item for consumption in
|
||||
@@ -445,29 +519,27 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
|
||||
var itemsLen int
|
||||
for {
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.mutex.Lock()
|
||||
itemsLen = len(h.currentStatus.Items)
|
||||
if itemsLen == maxUnconsumedHealResultItems {
|
||||
// unlock and wait to check again if we can push
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
|
||||
// wait for a second, or quit if an external
|
||||
// stop signal is received or the
|
||||
// unconsumedTimer fires.
|
||||
select {
|
||||
// Check after a second
|
||||
case <-time.After(time.Second):
|
||||
h.mutex.Unlock()
|
||||
continue
|
||||
|
||||
case <-h.stopSignalCh:
|
||||
case <-h.ctx.Done():
|
||||
h.mutex.Unlock()
|
||||
// discard result and return.
|
||||
return errHealPushStopNDiscard
|
||||
return errHealStopSignalled
|
||||
|
||||
// Timeout if no results consumed for too
|
||||
// long.
|
||||
// Timeout if no results consumed for too long.
|
||||
case <-unconsumedTimer.C:
|
||||
h.mutex.Unlock()
|
||||
return errHealIdleTimeout
|
||||
|
||||
}
|
||||
}
|
||||
break
|
||||
@@ -484,13 +556,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
h.currentStatus.Items = append(h.currentStatus.Items, r)
|
||||
|
||||
// release lock
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
|
||||
// This is a "safe" point for the heal sequence to quit if
|
||||
// signaled externally.
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
h.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -504,10 +570,10 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
// the heal-sequence.
|
||||
func (h *healSequence) healSequenceStart() {
|
||||
// Set status as running
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.mutex.Lock()
|
||||
h.currentStatus.Summary = healRunningStatus
|
||||
h.currentStatus.StartTime = UTCNow()
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
h.mutex.Unlock()
|
||||
|
||||
if h.sourceCh == nil {
|
||||
go h.traverseAndHeal()
|
||||
@@ -517,25 +583,27 @@ func (h *healSequence) healSequenceStart() {
|
||||
|
||||
select {
|
||||
case err, ok := <-h.traverseAndHealDoneCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
h.mutex.Lock()
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
defer h.currentStatus.updateLock.Unlock()
|
||||
// Heal traversal is complete.
|
||||
if ok {
|
||||
if err == nil {
|
||||
// heal traversal succeeded.
|
||||
h.currentStatus.Summary = healFinishedStatus
|
||||
} else {
|
||||
// heal traversal had an error.
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = err.Error()
|
||||
} else {
|
||||
// heal traversal succeeded.
|
||||
h.currentStatus.Summary = healFinishedStatus
|
||||
}
|
||||
|
||||
case <-h.stopSignalCh:
|
||||
h.mutex.Unlock()
|
||||
case <-h.ctx.Done():
|
||||
h.mutex.Lock()
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
h.mutex.Unlock()
|
||||
|
||||
// drain traverse channel so the traversal
|
||||
// go-routine does not leak.
|
||||
@@ -548,81 +616,108 @@ func (h *healSequence) healSequenceStart() {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(path string, healType madmin.HealItemType) error {
|
||||
var respCh = make(chan healResult)
|
||||
defer close(respCh)
|
||||
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
|
||||
// Send heal request
|
||||
globalBackgroundHealRoutine.queueHealTask(healTask{path: path, responseCh: respCh, opts: h.settings})
|
||||
// Wait for answer and push result to the client
|
||||
res := <-respCh
|
||||
if !h.reportProgress {
|
||||
return nil
|
||||
task := healTask{
|
||||
path: source.path,
|
||||
opts: h.settings,
|
||||
responseCh: h.respCh,
|
||||
}
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and return success.
|
||||
if isErrObjectNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
if !source.opts.Equal(h.settings) {
|
||||
task.opts = source.opts
|
||||
}
|
||||
globalBackgroundHealRoutine.queueHealTask(task)
|
||||
|
||||
select {
|
||||
case res := <-h.respCh:
|
||||
if !h.reportProgress {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and
|
||||
// return success.
|
||||
if isErrObjectNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
// Progress is not reported in case of background heal processing.
|
||||
// Instead we increment relevant counter based on the heal result
|
||||
// for prometheus reporting.
|
||||
if res.err != nil {
|
||||
for _, d := range res.result.After.Drives {
|
||||
// For failed items we report the endpoint and drive state
|
||||
// This will help users take corrective actions for drives
|
||||
h.healFailedItemsMap[d.Endpoint+","+d.State]++
|
||||
}
|
||||
} else {
|
||||
// Only object type reported for successful healing
|
||||
h.healedItemsMap[res.result.Type]++
|
||||
}
|
||||
|
||||
// Report caller of any failure
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and return success.
|
||||
if isErrObjectNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
}
|
||||
|
||||
func (h *healSequence) healItemsFromSourceCh() error {
|
||||
h.lastHealActivity = UTCNow()
|
||||
|
||||
// Start healing the config prefix.
|
||||
if err := h.healMinioSysMeta(minioConfigPrefix)(); err != nil {
|
||||
bucketsOnly := true // heal buckets only, not objects.
|
||||
if err := h.healItems(bucketsOnly); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
// Start healing the bucket config prefix.
|
||||
if err := h.healMinioSysMeta(bucketConfigPrefix)(); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case source, ok := <-h.sourceCh:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
var itemType madmin.HealItemType
|
||||
switch {
|
||||
case source.path == nopHeal:
|
||||
continue
|
||||
case source.path == SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
case !strings.Contains(source.path, SlashSeparator):
|
||||
itemType = madmin.HealItemBucket
|
||||
default:
|
||||
itemType = madmin.HealItemObject
|
||||
}
|
||||
|
||||
// Start healing the background ops prefix.
|
||||
if err := h.healMinioSysMeta(backgroundOpsMetaPrefix)(); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
if err := h.queueHealTask(source, itemType); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
for path := range h.sourceCh {
|
||||
|
||||
var itemType madmin.HealItemType
|
||||
switch {
|
||||
case path == nopHeal:
|
||||
continue
|
||||
case path == SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
case !strings.Contains(path, SlashSeparator):
|
||||
itemType = madmin.HealItemBucket
|
||||
default:
|
||||
itemType = madmin.HealItemObject
|
||||
h.scannedItemsMap[itemType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(path, itemType); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
h.scannedItemsCount++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *healSequence) healFromSourceCh() {
|
||||
h.healItemsFromSourceCh()
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
func (h *healSequence) healItems() error {
|
||||
func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
// Start with format healing
|
||||
if err := h.healDiskFormat(); err != nil {
|
||||
return err
|
||||
@@ -638,13 +733,8 @@ func (h *healSequence) healItems() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start healing the background ops prefix.
|
||||
if err := h.healMinioSysMeta(backgroundOpsMetaPrefix)(); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
// Heal buckets and objects
|
||||
return h.healBuckets()
|
||||
return h.healBuckets(bucketsOnly)
|
||||
}
|
||||
|
||||
// traverseAndHeal - traverses on-disk data and performs healing
|
||||
@@ -655,13 +745,8 @@ func (h *healSequence) healItems() error {
|
||||
// has to wait until a safe point is reached, such as between scanning
|
||||
// two objects.
|
||||
func (h *healSequence) traverseAndHeal() {
|
||||
if err := h.healItems(); err != nil {
|
||||
if h.isQuitting() {
|
||||
err = errHealStopSignalled
|
||||
}
|
||||
h.traverseAndHealDoneCh <- err
|
||||
}
|
||||
|
||||
bucketsOnly := false // Heals buckets and objects also.
|
||||
h.traverseAndHealDoneCh <- h.healItems(bucketsOnly)
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
@@ -678,12 +763,12 @@ func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
// NOTE: Healing on meta is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// meta are always upto date and correct.
|
||||
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, func(bucket string, object string) error {
|
||||
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket string, object string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
herr := h.queueHealTask(pathJoin(bucket, object), madmin.HealItemBucketMetadata)
|
||||
herr := h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemBucketMetadata)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if isErrObjectNotFound(herr) {
|
||||
@@ -707,18 +792,18 @@ func (h *healSequence) healDiskFormat() error {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
return h.queueHealTask(SlashSeparator, madmin.HealItemMetadata)
|
||||
return h.queueHealTask(healSource{path: SlashSeparator}, madmin.HealItemMetadata)
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets() error {
|
||||
func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// 1. If a bucket was specified, heal only the bucket.
|
||||
if h.bucket != "" {
|
||||
return h.healBucket(h.bucket)
|
||||
return h.healBucket(h.bucket, bucketsOnly)
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
@@ -733,7 +818,7 @@ func (h *healSequence) healBuckets() error {
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
if err = h.healBucket(bucket.Name); err != nil {
|
||||
if err = h.healBucket(bucket.Name, bucketsOnly); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -742,17 +827,21 @@ func (h *healSequence) healBuckets() error {
|
||||
}
|
||||
|
||||
// healBucket - traverses and heals given bucket
|
||||
func (h *healSequence) healBucket(bucket string) error {
|
||||
func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(bucket, madmin.HealItemBucket); err != nil {
|
||||
if err := h.queueHealTask(healSource{path: bucket}, madmin.HealItemBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bucketsOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !h.settings.Recursive {
|
||||
if h.objPrefix != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
@@ -768,7 +857,7 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.healObject); err != nil {
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.settings, h.healObject); err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
return nil
|
||||
@@ -776,15 +865,15 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
|
||||
// healObject - heal the given object and record result
|
||||
func (h *healSequence) healObject(bucket, object string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
return h.queueHealTask(pathJoin(bucket, object), madmin.HealItemObject)
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
return h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemObject)
|
||||
}
|
||||
|
||||
@@ -24,16 +24,18 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
adminPathPrefix = minioReservedBucketPath + "/admin"
|
||||
adminAPIVersion = madmin.AdminAPIVersion
|
||||
adminAPIVersionPrefix = SlashSeparator + madmin.AdminAPIVersion
|
||||
adminPathPrefix = minioReservedBucketPath + "/admin"
|
||||
adminAPIVersionV2 = madmin.AdminAPIVersionV2
|
||||
adminAPIVersion = madmin.AdminAPIVersion
|
||||
adminAPIVersionPrefix = SlashSeparator + adminAPIVersion
|
||||
adminAPIVersionV2Prefix = SlashSeparator + adminAPIVersionV2
|
||||
)
|
||||
|
||||
// adminAPIHandlers provides HTTP handlers for MinIO admin API.
|
||||
type adminAPIHandlers struct{}
|
||||
|
||||
// registerAdminRouter - Add handler functions for each service REST API routes.
|
||||
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) {
|
||||
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps, enableBucketQuotaOps bool) {
|
||||
|
||||
adminAPI := adminAPIHandlers{}
|
||||
// Admin router
|
||||
@@ -41,125 +43,178 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
/// Service operations
|
||||
|
||||
// Restart and stop MinIO service.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceActionHandler)).Queries("action", "{action:.*}")
|
||||
// Update MinIO servers.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
|
||||
|
||||
// Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
|
||||
// Harware Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/hardware").HandlerFunc(httpTraceAll(adminAPI.ServerHardwareInfoHandler)).Queries("hwType", "{hwType:.*}")
|
||||
|
||||
// StorageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
|
||||
// DataUsageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
|
||||
|
||||
if globalIsDistXL || globalIsXL {
|
||||
/// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
|
||||
|
||||
/// Health operations
|
||||
|
||||
}
|
||||
// Performance command - return performance details based on input type
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}")
|
||||
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/get-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/set-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKVHandler))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix + "/del-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.DelConfigKVHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/help-config-kv").HandlerFunc(httpTraceAll(adminAPI.HelpConfigKVHandler)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/list-config-history-kv").HandlerFunc(httpTraceAll(adminAPI.ListConfigHistoryKVHandler)).Queries("count", "{count:[0-9]+}")
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/clear-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/restore-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
adminVersions := []string{
|
||||
adminAPIVersionPrefix,
|
||||
adminAPIVersionV2Prefix,
|
||||
}
|
||||
|
||||
/// Config operations
|
||||
if enableConfigOps {
|
||||
// Get config
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
|
||||
for _, adminVersion := range adminVersions {
|
||||
// Restart and stop MinIO service.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceHandler)).Queries("action", "{action:.*}")
|
||||
// Update MinIO servers.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
|
||||
|
||||
// Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
|
||||
|
||||
// StorageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
|
||||
// DataUsageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
|
||||
|
||||
if globalIsDistXL || globalIsXL {
|
||||
/// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
|
||||
|
||||
/// Health operations
|
||||
|
||||
}
|
||||
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/set-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKVHandler))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/del-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.DelConfigKVHandler))
|
||||
}
|
||||
|
||||
// Enable config help in all modes.
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/help-config-kv").HandlerFunc(httpTraceAll(adminAPI.HelpConfigKVHandler)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
|
||||
|
||||
// Config KV history operations.
|
||||
if enableConfigOps {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-config-history-kv").HandlerFunc(httpTraceAll(adminAPI.ListConfigHistoryKVHandler)).Queries("count", "{count:[0-9]+}")
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/clear-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
}
|
||||
|
||||
/// Config import/export bulk operations
|
||||
if enableConfigOps {
|
||||
// Get config
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
|
||||
}
|
||||
|
||||
if enableIAMOps {
|
||||
// -- IAM APIs --
|
||||
|
||||
// Add policy IAM
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountUsageInfoHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// Service accounts ops
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/add-service-account").HandlerFunc(httpTraceHdrs(adminAPI.AddServiceAccount))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-service-accounts").HandlerFunc(httpTraceHdrs(adminAPI.ListServiceAccounts))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/delete-service-account").HandlerFunc(httpTraceHdrs(adminAPI.DeleteServiceAccount)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
if adminVersion == adminAPIVersionV2Prefix {
|
||||
// Info policy IAM v2
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicyV2)).Queries("name", "{name:.*}")
|
||||
|
||||
// List policies v2
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPoliciesV2))
|
||||
} else {
|
||||
// Info policy IAM latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// List policies latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
}
|
||||
|
||||
// Remove policy IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Set user or group policy
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-or-group-policy").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
|
||||
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
|
||||
|
||||
// Remove user IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// List users
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
|
||||
// User info
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Add/Remove members from group
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
|
||||
|
||||
// Get Group
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
|
||||
|
||||
// List Groups
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
|
||||
|
||||
// Set Group Status
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
}
|
||||
|
||||
// Quota operations
|
||||
if enableConfigOps && enableBucketQuotaOps {
|
||||
// GetBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// RemoveBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
}
|
||||
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
if globalIsDistXL {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
}
|
||||
|
||||
// HTTP Trace
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(adminAPI.TraceHandler)
|
||||
|
||||
// Console Logs
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
|
||||
|
||||
// -- KMS APIs --
|
||||
//
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
|
||||
|
||||
if !globalIsGateway {
|
||||
// -- OBD API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
|
||||
Queries("perfdrive", "{perfdrive:true|false}",
|
||||
"perfnet", "{perfnet:true|false}",
|
||||
"minioinfo", "{minioinfo:true|false}",
|
||||
"minioconfig", "{minioconfig:true|false}",
|
||||
"syscpu", "{syscpu:true|false}",
|
||||
"sysdiskhw", "{sysdiskhw:true|false}",
|
||||
"sysosinfo", "{sysosinfo:true|false}",
|
||||
"sysmem", "{sysmem:true|false}",
|
||||
"sysprocess", "{sysprocess:true|false}",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if enableIAMOps {
|
||||
// -- IAM APIs --
|
||||
|
||||
// Add policy IAM
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name",
|
||||
"{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// Info policy IAM
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Remove policy IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Set user or group policy
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-user-or-group-policy").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
|
||||
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
|
||||
|
||||
// Remove user IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// List users
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
|
||||
// User info
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Add/Remove members from group
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
|
||||
|
||||
// Get Group
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
|
||||
|
||||
// List Groups
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
|
||||
|
||||
// Set Group Status
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// List policies
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
}
|
||||
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
if globalIsDistXL {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
}
|
||||
|
||||
// HTTP Trace
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/trace").HandlerFunc(adminAPI.TraceHandler)
|
||||
|
||||
// Console Logs
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
|
||||
|
||||
// -- KMS APIs --
|
||||
//
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
|
||||
@@ -17,224 +17,59 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/pkg/cpu"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/mem"
|
||||
|
||||
cpuhw "github.com/shirou/gopsutil/cpu"
|
||||
)
|
||||
|
||||
// getLocalMemUsage - returns ServerMemUsageInfo for all zones, endpoints.
|
||||
func getLocalMemUsage(endpointZones EndpointZones, r *http.Request) ServerMemUsageInfo {
|
||||
var memUsages []mem.Usage
|
||||
var historicUsages []mem.Usage
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
memUsages = append(memUsages, mem.GetUsage())
|
||||
historicUsages = append(historicUsages, mem.GetHistoricUsage())
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return ServerMemUsageInfo{
|
||||
Addr: addr,
|
||||
Usage: memUsages,
|
||||
HistoricUsage: historicUsages,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalCPULoad - returns ServerCPULoadInfo for all zones, endpoints.
|
||||
func getLocalCPULoad(endpointZones EndpointZones, r *http.Request) ServerCPULoadInfo {
|
||||
var cpuLoads []cpu.Load
|
||||
var historicLoads []cpu.Load
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuLoads = append(cpuLoads, cpu.GetLoad())
|
||||
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return ServerCPULoadInfo{
|
||||
Addr: addr,
|
||||
Load: cpuLoads,
|
||||
HistoricLoad: historicLoads,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalDrivesPerf - returns ServerDrivesPerfInfo for all zones, endpoints.
|
||||
func getLocalDrivesPerf(endpointZones EndpointZones, size int64, r *http.Request) madmin.ServerDrivesPerfInfo {
|
||||
var dps []disk.Performance
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
if _, err := os.Stat(endpoint.Path); err != nil {
|
||||
// Since this drive is not available, add relevant details and proceed
|
||||
dps = append(dps, disk.Performance{Path: endpoint.Path, Error: err.Error()})
|
||||
continue
|
||||
}
|
||||
dp := disk.GetPerformance(pathJoin(endpoint.Path, minioMetaTmpBucket, mustGetUUID()), size)
|
||||
dp.Path = endpoint.Path
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return madmin.ServerDrivesPerfInfo{
|
||||
Addr: addr,
|
||||
Perf: dps,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalCPUInfo - returns ServerCPUHardwareInfo for all zones, endpoints.
|
||||
func getLocalCPUInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerCPUHardwareInfo {
|
||||
var cpuHardwares []cpuhw.InfoStat
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
// Add to the list of visited hosts
|
||||
seenHosts.Add(endpoint.Host)
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuHardware, err := cpuhw.Info()
|
||||
if err != nil {
|
||||
return madmin.ServerCPUHardwareInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
cpuHardwares = append(cpuHardwares, cpuHardware...)
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
|
||||
return madmin.ServerCPUHardwareInfo{
|
||||
Addr: addr,
|
||||
CPUInfo: cpuHardwares,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalNetworkInfo - returns ServerNetworkHardwareInfo for all zones, endpoints.
|
||||
func getLocalNetworkInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerNetworkHardwareInfo {
|
||||
var networkHardwares []net.Interface
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
// Add to the list of visited hosts
|
||||
seenHosts.Add(endpoint.Host)
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
networkHardware, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return madmin.ServerNetworkHardwareInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
networkHardwares = append(networkHardwares, networkHardware...)
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
|
||||
return madmin.ServerNetworkHardwareInfo{
|
||||
Addr: addr,
|
||||
NetworkInfo: networkHardwares,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalServerProperty - returns ServerDrivesPerfInfo for only the
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
|
||||
var di madmin.Disk
|
||||
var disks []madmin.Disk
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
network := make(map[string]string)
|
||||
hosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
|
||||
url := strings.Replace(endpoint.URL.String(), endpoint.Path, "", -1)
|
||||
if url == "" {
|
||||
url = r.Host
|
||||
nodeName := endpoint.Host
|
||||
if nodeName == "" {
|
||||
nodeName = r.Host
|
||||
}
|
||||
hosts.Add(url)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
url = fetchAddress(url)
|
||||
network[url] = "online"
|
||||
if _, err := os.Stat(endpoint.Path); err != nil {
|
||||
continue
|
||||
// Only proceed for local endpoints
|
||||
network[nodeName] = "online"
|
||||
var di = madmin.Disk{
|
||||
DrivePath: endpoint.Path,
|
||||
}
|
||||
diInfo, err := disk.GetInfo(endpoint.Path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || isSysErrPathNotFound(err) {
|
||||
di.State = madmin.DriveStateMissing
|
||||
} else {
|
||||
di.State = madmin.DriveStateCorrupt
|
||||
}
|
||||
} else {
|
||||
di.State = madmin.DriveStateOk
|
||||
di.DrivePath = endpoint.Path
|
||||
di.TotalSpace = diInfo.Total
|
||||
di.UsedSpace = diInfo.Total - diInfo.Free
|
||||
di.Utilization = float64((diInfo.Total - diInfo.Free) / diInfo.Total * 100)
|
||||
}
|
||||
|
||||
diInfo, _ := disk.GetInfo(endpoint.Path)
|
||||
di.State = "ok"
|
||||
di.DrivePath = endpoint.Path
|
||||
di.TotalSpace = diInfo.Total
|
||||
di.UsedSpace = diInfo.Total - diInfo.Free
|
||||
di.Utilization = float64((diInfo.Total - diInfo.Free) / diInfo.Total * 100)
|
||||
disks = append(disks, di)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for host := range hosts {
|
||||
_, present := network[host]
|
||||
if !present {
|
||||
err := checkConnection(host)
|
||||
host = fetchAddress(host)
|
||||
if err != nil {
|
||||
network[host] = "offline"
|
||||
} else {
|
||||
network[host] = "online"
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
err := IsServerResolvable(endpoint)
|
||||
if err == nil {
|
||||
network[nodeName] = "online"
|
||||
} else {
|
||||
network[nodeName] = "offline"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -249,13 +84,3 @@ func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin
|
||||
Disks: disks,
|
||||
}
|
||||
}
|
||||
|
||||
// Replaces http and https from address
|
||||
func fetchAddress(address string) string {
|
||||
if strings.Contains(address, "http://") {
|
||||
address = strings.Replace(address, "http://", "", -1)
|
||||
} else if strings.Contains(address, "https://") {
|
||||
address = strings.Replace(address, "https://", "", -1)
|
||||
}
|
||||
return address
|
||||
}
|
||||
|
||||
@@ -21,17 +21,21 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
minio "github.com/minio/minio-go/v6"
|
||||
"github.com/minio/minio-go/v6/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
@@ -90,9 +94,12 @@ const (
|
||||
ErrMissingContentLength
|
||||
ErrMissingContentMD5
|
||||
ErrMissingRequestBodyError
|
||||
ErrMissingSecurityHeader
|
||||
ErrNoSuchBucket
|
||||
ErrNoSuchBucketPolicy
|
||||
ErrNoSuchBucketLifecycle
|
||||
ErrNoSuchLifecycleConfiguration
|
||||
ErrNoSuchBucketSSEConfig
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrNoSuchVersion
|
||||
@@ -114,7 +121,8 @@ const (
|
||||
ErrMissingCredTag
|
||||
ErrCredMalformed
|
||||
ErrInvalidRegion
|
||||
ErrInvalidService
|
||||
ErrInvalidServiceS3
|
||||
ErrInvalidServiceSTS
|
||||
ErrInvalidRequestVersion
|
||||
ErrMissingSignTag
|
||||
ErrMissingSignHeadersTag
|
||||
@@ -142,11 +150,16 @@ const (
|
||||
ErrBadRequest
|
||||
ErrKeyTooLongError
|
||||
ErrInvalidBucketObjectLockConfiguration
|
||||
ErrObjectLockConfigurationNotFound
|
||||
ErrObjectLockConfigurationNotAllowed
|
||||
ErrNoSuchObjectLockConfiguration
|
||||
ErrObjectLocked
|
||||
ErrInvalidRetentionDate
|
||||
ErrPastObjectLockRetainDate
|
||||
ErrUnknownWORMModeDirective
|
||||
ErrBucketTaggingNotFound
|
||||
ErrObjectLockInvalidHeaders
|
||||
ErrInvalidTagDirective
|
||||
// Add new error codes here.
|
||||
|
||||
// SSE-S3 related API errors
|
||||
@@ -199,6 +212,7 @@ const (
|
||||
ErrInvalidResourceName
|
||||
ErrServerNotInitialized
|
||||
ErrOperationTimedOut
|
||||
ErrOperationMaxedOut
|
||||
ErrInvalidRequest
|
||||
// MinIO storage class error codes
|
||||
ErrInvalidStorageClass
|
||||
@@ -222,6 +236,10 @@ const (
|
||||
ErrAdminCredentialsMismatch
|
||||
ErrInsecureClientRequest
|
||||
ErrObjectTampered
|
||||
// Bucket Quota error codes
|
||||
ErrAdminBucketQuotaExceeded
|
||||
ErrAdminNoSuchQuotaConfiguration
|
||||
ErrAdminBucketQuotaDisabled
|
||||
|
||||
ErrHealNotImplemented
|
||||
ErrHealNoSuchProcess
|
||||
@@ -321,19 +339,28 @@ const (
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrServiceAccountNotFound
|
||||
ErrPostPolicyConditionInvalidFormat
|
||||
)
|
||||
|
||||
type errorCodeMap map[APIErrorCode]APIError
|
||||
|
||||
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
|
||||
func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError {
|
||||
apiErr, ok := e[errCode]
|
||||
if !ok {
|
||||
return e[ErrInternalError]
|
||||
apiErr = e[ErrInternalError]
|
||||
}
|
||||
if err != nil {
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
|
||||
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
|
||||
return e.ToAPIErrWithErr(errCode, nil)
|
||||
}
|
||||
|
||||
// error code to APIError structure, these fields carry respective
|
||||
// descriptions for all the error responses.
|
||||
var errorCodes = errorCodeMap{
|
||||
@@ -462,6 +489,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Missing required header for this request: Content-Md5.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingSecurityHeader: {
|
||||
Code: "MissingSecurityHeader",
|
||||
Description: "Your request was missing a required header",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingRequestBodyError: {
|
||||
Code: "MissingRequestBodyError",
|
||||
Description: "Request body is empty.",
|
||||
@@ -482,6 +514,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The bucket lifecycle configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchLifecycleConfiguration: {
|
||||
Code: "NoSuchLifecycleConfiguration",
|
||||
Description: "The lifecycle configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchBucketSSEConfig: {
|
||||
Code: "ServerSideEncryptionConfigurationNotFoundError",
|
||||
Description: "The server side encryption configuration was not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchKey: {
|
||||
Code: "NoSuchKey",
|
||||
Description: "The specified key does not exist.",
|
||||
@@ -626,9 +668,14 @@ var errorCodes = errorCodeMap{
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect service \"s4\". This endpoint belongs to \"s3\".".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrInvalidService: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
|
||||
ErrInvalidServiceS3: {
|
||||
Code: "AuthorizationParametersError",
|
||||
Description: "Error parsing the Credential/X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidServiceSTS: {
|
||||
Code: "AuthorizationParametersError",
|
||||
Description: "Error parsing the Credential parameter; incorrect service. This endpoint belongs to \"sts\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
@@ -731,6 +778,26 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Bucket is missing ObjectLockConfiguration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketTaggingNotFound: {
|
||||
Code: "NoSuchTagSet",
|
||||
Description: "The TagSet does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrObjectLockConfigurationNotFound: {
|
||||
Code: "ObjectLockConfigurationNotFoundError",
|
||||
Description: "Object Lock configuration does not exist for this bucket",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrObjectLockConfigurationNotAllowed: {
|
||||
Code: "InvalidBucketState",
|
||||
Description: "Object Lock configuration cannot be enabled on existing buckets",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrNoSuchObjectLockConfiguration: {
|
||||
Code: "NoSuchObjectLockConfiguration",
|
||||
Description: "The specified object does not have a ObjectLock configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectLocked: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Object is WORM protected and cannot be overwritten",
|
||||
@@ -822,6 +889,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Your metadata headers exceed the maximum allowed metadata size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTagDirective: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Unknown tag directive.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidEncryptionMethod: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "The encryption method specified is not supported",
|
||||
@@ -1027,6 +1099,21 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Credentials in config mismatch with server environment variables",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrAdminBucketQuotaExceeded: {
|
||||
Code: "XMinioAdminBucketQuotaExceeded",
|
||||
Description: "Bucket quota exceeded",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchQuotaConfiguration: {
|
||||
Code: "XMinioAdminNoSuchQuotaConfiguration",
|
||||
Description: "The quota configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminBucketQuotaDisabled: {
|
||||
Code: "XMinioAdminBucketQuotaDisabled",
|
||||
Description: "Quota specified but disk usage crawl is disabled on MinIO server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInsecureClientRequest: {
|
||||
Code: "XMinioInsecureClientRequest",
|
||||
Description: "Cannot respond to plain-text request from TLS-encrypted server",
|
||||
@@ -1037,6 +1124,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "A timeout occurred while trying to lock a resource",
|
||||
HTTPStatusCode: http.StatusRequestTimeout,
|
||||
},
|
||||
ErrOperationMaxedOut: {
|
||||
Code: "XMinioServerTimedOut",
|
||||
Description: "A timeout exceeded while waiting to proceed with the request",
|
||||
HTTPStatusCode: http.StatusRequestTimeout,
|
||||
},
|
||||
ErrUnsupportedMetadata: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Your metadata headers are not supported.",
|
||||
@@ -1532,6 +1624,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "User is not allowed to be same as admin access key",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrAdminAccountNotEligible: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The administrator key is not eligible for this operation",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrServiceAccountNotFound: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The specified service account is not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrPostPolicyConditionInvalidFormat: {
|
||||
Code: "PostPolicyInvalidKeyName",
|
||||
Description: "Invalid according to Policy: Policy Condition failed",
|
||||
@@ -1547,7 +1649,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
if err == nil {
|
||||
return ErrNone
|
||||
}
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
|
||||
switch err {
|
||||
case errInvalidArgument:
|
||||
apiErr = ErrAdminInvalidArgument
|
||||
@@ -1600,18 +1702,20 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrKMSNotConfigured
|
||||
case crypto.ErrKMSAuthLogin:
|
||||
apiErr = ErrKMSAuthFailure
|
||||
case errOperationTimedOut, context.Canceled, context.DeadlineExceeded:
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
apiErr = ErrOperationTimedOut
|
||||
case errDiskNotFound:
|
||||
apiErr = ErrSlowDown
|
||||
case errInvalidRetentionDate:
|
||||
case objectlock.ErrInvalidRetentionDate:
|
||||
apiErr = ErrInvalidRetentionDate
|
||||
case errPastObjectLockRetainDate:
|
||||
case objectlock.ErrPastObjectLockRetainDate:
|
||||
apiErr = ErrPastObjectLockRetainDate
|
||||
case errUnknownWORMModeDirective:
|
||||
case objectlock.ErrUnknownWORMModeDirective:
|
||||
apiErr = ErrUnknownWORMModeDirective
|
||||
case errObjectLockInvalidHeaders:
|
||||
case objectlock.ErrObjectLockInvalidHeaders:
|
||||
apiErr = ErrObjectLockInvalidHeaders
|
||||
case objectlock.ErrMalformedXML:
|
||||
apiErr = ErrMalformedXML
|
||||
}
|
||||
|
||||
// Compression errors
|
||||
@@ -1701,7 +1805,17 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
case BucketPolicyNotFound:
|
||||
apiErr = ErrNoSuchBucketPolicy
|
||||
case BucketLifecycleNotFound:
|
||||
apiErr = ErrNoSuchBucketLifecycle
|
||||
apiErr = ErrNoSuchLifecycleConfiguration
|
||||
case BucketSSEConfigNotFound:
|
||||
apiErr = ErrNoSuchBucketSSEConfig
|
||||
case BucketTaggingNotFound:
|
||||
apiErr = ErrBucketTaggingNotFound
|
||||
case BucketObjectLockConfigNotFound:
|
||||
apiErr = ErrObjectLockConfigurationNotFound
|
||||
case BucketQuotaConfigNotFound:
|
||||
apiErr = ErrAdminNoSuchQuotaConfiguration
|
||||
case BucketQuotaExceeded:
|
||||
apiErr = ErrAdminBucketQuotaExceeded
|
||||
case *event.ErrInvalidEventName:
|
||||
apiErr = ErrEventNotification
|
||||
case *event.ErrInvalidARN:
|
||||
@@ -1724,10 +1838,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrOverlappingFilterNotification
|
||||
case *event.ErrUnsupportedConfiguration:
|
||||
apiErr = ErrUnsupportedNotification
|
||||
case OperationTimedOut:
|
||||
apiErr = ErrOperationTimedOut
|
||||
case BackendDown:
|
||||
apiErr = ErrBackendDown
|
||||
case crypto.Error:
|
||||
apiErr = ErrObjectTampered
|
||||
case ObjectNameTooLong:
|
||||
apiErr = ErrKeyTooLongError
|
||||
default:
|
||||
@@ -1772,9 +1886,41 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
// their internal error types. This code is only
|
||||
// useful with gateway implementations.
|
||||
switch e := err.(type) {
|
||||
case *xml.SyntaxError:
|
||||
apiErr = APIError{
|
||||
Code: "MalformedXML",
|
||||
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrMalformedXML].Description,
|
||||
e.Error()),
|
||||
HTTPStatusCode: errorCodes[ErrMalformedXML].HTTPStatusCode,
|
||||
}
|
||||
case url.EscapeError:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioInvalidObjectName",
|
||||
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrInvalidObjectName].Description,
|
||||
e.Error()),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case lifecycle.Error:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case tags.Error:
|
||||
apiErr = APIError{
|
||||
Code: e.Code(),
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case policy.Error:
|
||||
apiErr = APIError{
|
||||
Code: "MalformedPolicy",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case crypto.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XKMSInternalError",
|
||||
Code: "XMinIOEncryptionError",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
@@ -1802,12 +1948,6 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: e.Response().StatusCode,
|
||||
}
|
||||
case oss.ServiceError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
// Add more Gateway SDKs here if any in future.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,9 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
@@ -80,6 +82,10 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""}
|
||||
}
|
||||
|
||||
if strings.Contains(objInfo.ETag, "-") && len(objInfo.Parts) > 0 {
|
||||
w.Header().Set(xhttp.AmzMpPartsCount, strconv.Itoa(len(objInfo.Parts)))
|
||||
}
|
||||
|
||||
if objInfo.ContentType != "" {
|
||||
w.Header().Set(xhttp.ContentType, objInfo.ContentType)
|
||||
}
|
||||
@@ -92,6 +98,18 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
if globalCacheConfig.Enabled {
|
||||
w.Header().Set(xhttp.XCache, objInfo.CacheStatus.String())
|
||||
w.Header().Set(xhttp.XCacheLookup, objInfo.CacheLookupStatus.String())
|
||||
}
|
||||
|
||||
// Set tag count if object has tags
|
||||
tags, _ := url.ParseQuery(objInfo.UserTags)
|
||||
tagCount := len(tags)
|
||||
if tagCount != 0 {
|
||||
w.Header().Set(xhttp.AmzTagCount, strconv.Itoa(tagCount))
|
||||
}
|
||||
|
||||
// Set all other user defined metadata.
|
||||
for k, v := range objInfo.UserDefined {
|
||||
if HasPrefix(k, ReservedMetadataPrefix) {
|
||||
|
||||
@@ -429,7 +429,12 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
|
||||
content.Owner = owner
|
||||
content.VersionID = "null"
|
||||
content.IsLatest = true
|
||||
@@ -475,7 +480,11 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
content.Owner = owner
|
||||
contents = append(contents, content)
|
||||
}
|
||||
@@ -521,7 +530,11 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
content.Owner = owner
|
||||
if metadata {
|
||||
content.UserMetadata = make(StringMap)
|
||||
@@ -588,7 +601,8 @@ func generateCompleteMultpartUploadResponse(bucket, key, location, etag string)
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
ETag: etag,
|
||||
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
|
||||
ETag: "\"" + etag + "\"",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -753,17 +767,6 @@ func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIE
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
// writeVersionMismatchResponse - writes custom error responses for version mismatches.
|
||||
func writeVersionMismatchResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL, isJSON bool) {
|
||||
if isJSON {
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.String(), w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
||||
writeResponse(w, err.HTTPStatusCode, encodeResponseJSON(errorResponse), mimeJSON)
|
||||
} else {
|
||||
writeResponse(w, err.HTTPStatusCode, []byte(err.Description), mimeNone)
|
||||
}
|
||||
}
|
||||
|
||||
// writeCustomErrorResponseJSON - similar to writeErrorResponseJSON,
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -90,123 +90,200 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
for _, bucket := range routers {
|
||||
// Object operations
|
||||
// HeadObject
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler)))
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
|
||||
// CopyObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectParts
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler))).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler))).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler))).Queries("uploads", "")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler))).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler))).Queries("acl", "")
|
||||
// GetObjectTagging - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler))).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
|
||||
// PutObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
|
||||
// GetObjectTagging
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// PutObjectTagging
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// DeleteObjectTagging
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// SelectObjectContent
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler))).Queries("select", "").Queries("select-type", "2")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
|
||||
// GetObjectRetention
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectretention", httpTraceHdrs(api.GetObjectRetentionHandler))).Queries("retention", "")
|
||||
// GetObject
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler)))
|
||||
// CopyObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler)))
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectretention", httpTraceHdrs(api.PutObjectRetentionHandler))).Queries("retention", "")
|
||||
// PutObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler)))
|
||||
// DeleteObject
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler)))
|
||||
// PutObjectLegalHold
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectlegalhold", httpTraceHdrs(api.PutObjectLegalHoldHandler))).Queries("legal-hold", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
|
||||
// GetObjectLegalHold
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectlegalhold", httpTraceHdrs(api.GetObjectLegalHoldHandler))).Queries("legal-hold", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
// GetObject
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
|
||||
// CopyObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
|
||||
// PutObjectLegalHold
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
|
||||
// PutObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler))))
|
||||
// DeleteObject
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler))))
|
||||
|
||||
/// Bucket operations
|
||||
// GetBucketLocation
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler))).Queries("location", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
|
||||
// GetBucketPolicy
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler))).Queries("policy", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
|
||||
// GetBucketLifecycle
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketEncryption
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler))).Queries("acl", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
|
||||
// PutBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
|
||||
// GetBucketCors - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler))).Queries("cors", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler))).Queries("website", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
|
||||
// GetBucketAccelerateHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler))).Queries("accelerate", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
|
||||
// GetBucketRequestPaymentHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler))).Queries("requestPayment", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
|
||||
// GetBucketLoggingHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler))).Queries("logging", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
|
||||
// GetBucketLifecycleHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketReplicationHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler))).Queries("replication", "")
|
||||
// GetBucketTaggingHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler))).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler)))).Queries("replication", "")
|
||||
// GetBucketTaggingHandler
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler))).Queries("website", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler))).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
|
||||
|
||||
// GetBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler))).Queries("object-lock", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// GetBucketVersioning
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler))).Queries("versioning", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler))).Queries("notification", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
|
||||
// ListenBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listenbucketnotification", httpTraceAll(api.ListenBucketNotificationHandler))).Queries("events", "{events:.*}")
|
||||
// ListMultipartUploads
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler))).Queries("uploads", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
|
||||
// ListObjectsV2M
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler))).Queries("list-type", "2", "metadata", "true")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
|
||||
// ListObjectsV2
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler))).Queries("list-type", "2")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
|
||||
// ListBucketVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler))).Queries("versions", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler)))).Queries("versions", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler)))
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler))))
|
||||
// PutBucketLifecycle
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// PutBucketEncryption
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
|
||||
// PutBucketPolicy
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler))).Queries("policy", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
|
||||
|
||||
// PutBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler))).Queries("object-lock", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// PutBucketTaggingHandler
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbuckettagging", httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
|
||||
// PutBucketVersioning
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler))).Queries("versioning", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// PutBucketNotification
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler))).Queries("notification", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
|
||||
// PutBucket
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler)))
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler))))
|
||||
// HeadBucket
|
||||
bucket.Methods(http.MethodHead).HandlerFunc(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler)))
|
||||
bucket.Methods(http.MethodHead).HandlerFunc(
|
||||
maxClients(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler))))
|
||||
// PostPolicy
|
||||
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler)))
|
||||
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
|
||||
maxClients(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler))))
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler))).Queries("delete", "")
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler))).Queries("policy", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
|
||||
// DeleteBucketLifecycle
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// DeleteBucketEncryption
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
// DeleteBucket
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler)))
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler))))
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler)))
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))
|
||||
|
||||
@@ -26,15 +26,18 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
xjwt "github.com/minio/minio/cmd/jwt"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
// Verify if request has JWT.
|
||||
@@ -118,9 +121,7 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
return authTypeUnknown
|
||||
}
|
||||
|
||||
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
|
||||
func validateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
s3Err := ErrAccessDenied
|
||||
@@ -129,7 +130,7 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iamp
|
||||
// We only support admin credentials to access admin APIs.
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err != ErrNone {
|
||||
return cred, s3Err
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
// we only support V4 (no presign) with auth body
|
||||
@@ -139,14 +140,24 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iamp
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
|
||||
ctx := logger.SetReqInfo(ctx, reqInfo)
|
||||
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description), logger.Application)
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
var claims map[string]interface{}
|
||||
claims, s3Err = checkClaimsFromToken(r, cred)
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
return cred, claims, owner, ErrNone
|
||||
}
|
||||
|
||||
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, region)
|
||||
if s3Err != ErrNone {
|
||||
return cred, s3Err
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
@@ -173,18 +184,18 @@ func getSessionToken(r *http.Request) (token string) {
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
claims, _ := getClaimsFromToken(r)
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
claims := make(map[string]interface{})
|
||||
token := getSessionToken(r)
|
||||
func getClaimsFromToken(r *http.Request, token string) (map[string]interface{}, error) {
|
||||
claims := xjwt.NewMapClaims()
|
||||
if token == "" {
|
||||
return claims, nil
|
||||
return claims.Map(), nil
|
||||
}
|
||||
stsTokenCallback := func(jwtToken *jwtgo.Token) (interface{}, error) {
|
||||
|
||||
stsTokenCallback := func(claims *xjwt.MapClaims) ([]byte, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
@@ -195,66 +206,43 @@ func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
// on the client side and is treated like an opaque value.
|
||||
return []byte(globalActiveCred.SecretKey), nil
|
||||
}
|
||||
p := &jwtgo.Parser{
|
||||
ValidMethods: []string{
|
||||
jwtgo.SigningMethodHS256.Alg(),
|
||||
jwtgo.SigningMethodHS512.Alg(),
|
||||
},
|
||||
}
|
||||
jtoken, err := p.ParseWithClaims(token, jwtgo.MapClaims(claims), stsTokenCallback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !jtoken.Valid {
|
||||
|
||||
if err := xjwt.ParseWithClaims(token, claims, stsTokenCallback); err != nil {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
v, ok := claims["accessKey"]
|
||||
if !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
if _, ok = v.(string); !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
|
||||
if globalPolicyOPA == nil {
|
||||
// If OPA is not set and if ldap claim key is set,
|
||||
// allow the claim.
|
||||
if _, ok := claims[ldapUser]; ok {
|
||||
return claims, nil
|
||||
// If OPA is not set and if ldap claim key is set, allow the claim.
|
||||
if _, ok := claims.Lookup(ldapUser); ok {
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// If OPA is not set, session token should
|
||||
// have a policy and its mandatory, reject
|
||||
// requests without policy claim.
|
||||
p, pok := claims[iamPolicyName()]
|
||||
if !pok {
|
||||
_, pokOpenID := claims.Lookup(iamPolicyClaimNameOpenID())
|
||||
_, pokSA := claims.Lookup(iamPolicyClaimNameSA())
|
||||
if !pokOpenID && !pokSA {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
if _, pok = p.(string); !pok {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
sp, spok := claims[iampolicy.SessionPolicyName]
|
||||
// Sub policy is optional, if not set return success.
|
||||
|
||||
sp, spok := claims.Lookup(iampolicy.SessionPolicyName)
|
||||
if !spok {
|
||||
return claims, nil
|
||||
}
|
||||
// Sub policy is set but its not a string, reject such requests
|
||||
spStr, spok := sp.(string)
|
||||
if !spok {
|
||||
return nil, errAuthentication
|
||||
return claims.Map(), nil
|
||||
}
|
||||
// Looks like subpolicy is set and is a string, if set then its
|
||||
// base64 encoded, decode it. Decoding fails reject such requests.
|
||||
spBytes, err := base64.StdEncoding.DecodeString(spStr)
|
||||
spBytes, err := base64.StdEncoding.DecodeString(sp)
|
||||
if err != nil {
|
||||
// Base64 decoding fails, we should log to indicate
|
||||
// something is malforming the request sent by client.
|
||||
logger.LogIf(context.Background(), err, logger.Application)
|
||||
logger.LogIf(r.Context(), err, logger.Application)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
}
|
||||
return claims, nil
|
||||
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
@@ -263,12 +251,15 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
return nil, ErrNoAccessKey
|
||||
}
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(r)
|
||||
claims, err := getClaimsFromToken(r, token)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(context.Background(), err)
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
}
|
||||
@@ -293,7 +284,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
var cred auth.Credentials
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
return accessKey, owner, ErrSignatureVersionNotSupported
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
@@ -355,7 +346,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
@@ -369,7 +360,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '2'.
|
||||
@@ -402,12 +393,11 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
err error
|
||||
contentMD5, contentSHA256 []byte
|
||||
)
|
||||
|
||||
// Extract 'Content-Md5' if present.
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; ok {
|
||||
contentMD5, err = base64.StdEncoding.Strict().DecodeString(r.Header.Get(xhttp.ContentMD5))
|
||||
if err != nil || len(contentMD5) == 0 {
|
||||
return ErrInvalidDigest
|
||||
}
|
||||
contentMD5, err = checkValidMD5(r.Header)
|
||||
if err != nil {
|
||||
return ErrInvalidDigest
|
||||
}
|
||||
|
||||
// Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned)
|
||||
@@ -433,7 +423,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
r.Body = ioutil.NopCloser(reader)
|
||||
r.Body = reader
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
@@ -483,7 +473,107 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(context.Background(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
|
||||
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, map[string]interface{}, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
var s3Err APIErrorCode
|
||||
switch atype {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return cred, owner, nil, ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalServerRegion
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
|
||||
return cred, owner, claims, ErrNone
|
||||
}
|
||||
|
||||
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool, claims map[string]interface{}) (s3Err APIErrorCode) {
|
||||
var retSet bool
|
||||
if cred.AccessKey == "" {
|
||||
conditions := getConditionValues(r, "", "", nil)
|
||||
conditions["object-lock-mode"] = []string{string(retMode)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
|
||||
if retDays > 0 {
|
||||
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
|
||||
}
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.Action(policy.BypassGovernanceRetentionAction),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
})
|
||||
}
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.Action(policy.PutObjectRetentionAction),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
retSet = true
|
||||
}
|
||||
if byPassSet || retSet {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
conditions := getConditionValues(r, "", cred.AccessKey, claims)
|
||||
conditions["object-lock-mode"] = []string{string(retMode)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
|
||||
if retDays > 0 {
|
||||
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
|
||||
}
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ObjectName: objectName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
})
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
retSet = true
|
||||
}
|
||||
if byPassSet || retSet {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
|
||||
@@ -494,7 +584,7 @@ func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.R
|
||||
var owner bool
|
||||
switch atype {
|
||||
case authTypeUnknown:
|
||||
return ErrAccessDenied
|
||||
return ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
|
||||
@@ -510,10 +600,19 @@ func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.R
|
||||
return s3Err
|
||||
}
|
||||
|
||||
// Do not check for PutObjectRetentionAction permission,
|
||||
// if mode and retain until date are not set.
|
||||
// Can happen when bucket has default lock config set
|
||||
if action == iampolicy.PutObjectRetentionAction &&
|
||||
r.Header.Get(xhttp.AmzObjectLockMode) == "" &&
|
||||
r.Header.Get(xhttp.AmzObjectLockRetainUntilDate) == "" {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectAction,
|
||||
Action: policy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
IsOwner: false,
|
||||
|
||||
@@ -391,6 +391,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
@@ -425,3 +426,48 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAdminSignature(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
|
||||
creds, err := auth.CreateCredentials("admin", "mypassword")
|
||||
if err != nil {
|
||||
t.Fatalf("unable create credential, %s", err)
|
||||
}
|
||||
globalActiveCred = creds
|
||||
|
||||
testCases := []struct {
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
ErrCode APIErrorCode
|
||||
}{
|
||||
{"", "", ErrInvalidAccessKeyID},
|
||||
{"admin", "", ErrSignatureDoesNotMatch},
|
||||
{"admin", "wrongpassword", ErrSignatureDoesNotMatch},
|
||||
{"wronguser", "mypassword", ErrInvalidAccessKeyID},
|
||||
{"", "mypassword", ErrInvalidAccessKeyID},
|
||||
{"admin", "mypassword", ErrNone},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
req := mustNewRequest("GET", "http://localhost:9000/", 0, nil, t)
|
||||
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
_, _, _, s3Error := validateAdminSignature(ctx, req, globalMinioDefaultRegion)
|
||||
if s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i+1, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,43 +52,49 @@ func (h *healRoutine) queueHealTask(task healTask) {
|
||||
h.tasks <- task
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq(tolerance int32) {
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||
waitCount := 600
|
||||
// Any requests in progress, delay the heal.
|
||||
for (httpServer.GetRequestCount() >= tolerance) &&
|
||||
waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for heal requests and process them
|
||||
func (h *healRoutine) run() {
|
||||
ctx := context.Background()
|
||||
func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
for {
|
||||
select {
|
||||
case task, ok := <-h.tasks:
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||
waitCount := 600
|
||||
// Any requests in progress, delay the heal.
|
||||
for (httpServer.GetRequestCount() >= int32(globalEndpoints.Nodes())) &&
|
||||
waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()))
|
||||
|
||||
var res madmin.HealResultItem
|
||||
var err error
|
||||
bucket, object := urlPath2BucketObjectName(task.path)
|
||||
bucket, object := path2BucketObject(task.path)
|
||||
switch {
|
||||
case bucket == "" && object == "":
|
||||
res, err = bgHealDiskFormat(ctx, task.opts)
|
||||
res, err = healDiskFormat(ctx, objAPI, task.opts)
|
||||
case bucket != "" && object == "":
|
||||
res, err = bgHealBucket(ctx, bucket, task.opts)
|
||||
res, err = objAPI.HealBucket(ctx, bucket, task.opts.DryRun, task.opts.Remove)
|
||||
case bucket != "" && object != "":
|
||||
res, err = bgHealObject(ctx, bucket, object, task.opts)
|
||||
res, err = objAPI.HealObject(ctx, bucket, object, task.opts)
|
||||
}
|
||||
if task.responseCh != nil {
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
if task.path != slashSeparator && task.path != nopHeal {
|
||||
ObjectPathUpdated(task.path)
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
case <-h.doneCh:
|
||||
return
|
||||
case <-GlobalServiceDoneCh:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -102,45 +108,27 @@ func initHealRoutine() *healRoutine {
|
||||
|
||||
}
|
||||
|
||||
func startBackgroundHealing() {
|
||||
ctx := context.Background()
|
||||
|
||||
var objAPI ObjectLayer
|
||||
for {
|
||||
objAPI = newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
func startBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = initHealRoutine()
|
||||
go globalBackgroundHealRoutine.run()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
// Launch the background healer sequence to track
|
||||
// background healing operations
|
||||
info := objAPI.StorageInfo(ctx)
|
||||
info := objAPI.StorageInfo(ctx, false)
|
||||
numDisks := info.Backend.OnlineDisks.Sum() + info.Backend.OfflineDisks.Sum()
|
||||
nh := newBgHealSequence(numDisks)
|
||||
globalBackgroundHealState.LaunchNewHealSequence(nh)
|
||||
}
|
||||
|
||||
func initBackgroundHealing() {
|
||||
go startBackgroundHealing()
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
go startBackgroundHealing(ctx, objAPI)
|
||||
}
|
||||
|
||||
// bgHealDiskFormat - heals format.json, return value indicates if a
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func bgHealDiskFormat(ctx context.Context, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
res, err := objectAPI.HealFormat(ctx, opts.DryRun)
|
||||
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
res, err := objAPI.HealFormat(ctx, opts.DryRun)
|
||||
|
||||
// return any error, ignore error returned when disks have
|
||||
// already healed.
|
||||
@@ -161,24 +149,3 @@ func bgHealDiskFormat(ctx context.Context, opts madmin.HealOpts) (madmin.HealRes
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// bghealBucket - traverses and heals given bucket
|
||||
func bgHealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
return objectAPI.HealBucket(ctx, bucket, opts.DryRun, opts.Remove)
|
||||
}
|
||||
|
||||
// bgHealObject - heal the given object and record result
|
||||
func bgHealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
return objectAPI.HealObject(ctx, bucket, object, opts.DryRun, opts.Remove, opts.ScanMode)
|
||||
}
|
||||
|
||||
@@ -25,32 +25,19 @@ import (
|
||||
|
||||
const defaultMonitorNewDiskInterval = time.Minute * 10
|
||||
|
||||
func initLocalDisksAutoHeal() {
|
||||
go monitorLocalDisksAndHeal()
|
||||
func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
go monitorLocalDisksAndHeal(ctx, objAPI)
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal() {
|
||||
// Wait until the object layer is ready
|
||||
var objAPI ObjectLayer
|
||||
for {
|
||||
objAPI = newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
z, ok := objAPI.(*xlZones)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var bgSeq *healSequence
|
||||
var found bool
|
||||
|
||||
@@ -62,64 +49,76 @@ func monitorLocalDisksAndHeal() {
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
// Perform automatic disk healing when a new one is inserted
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
for {
|
||||
time.Sleep(defaultMonitorNewDiskInterval)
|
||||
|
||||
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
|
||||
for i, ep := range globalEndpoints {
|
||||
localDisksToHeal := Endpoints{}
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(defaultMonitorNewDiskInterval):
|
||||
// Attempt a heal as the server starts-up first.
|
||||
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
|
||||
var healNewDisks bool
|
||||
for i, ep := range globalEndpoints {
|
||||
localDisksToHeal := Endpoints{}
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
if err == errUnformattedDisk {
|
||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||
}
|
||||
}
|
||||
if len(localDisksToHeal) == 0 {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
if err == errUnformattedDisk {
|
||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||
}
|
||||
localDisksInZoneHeal[i] = localDisksToHeal
|
||||
healNewDisks = true
|
||||
}
|
||||
if len(localDisksToHeal) == 0 {
|
||||
|
||||
// Reformat disks only if needed.
|
||||
if !healNewDisks {
|
||||
continue
|
||||
}
|
||||
localDisksInZoneHeal[i] = localDisksToHeal
|
||||
}
|
||||
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- SlashSeparator
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- nopHeal
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{path: SlashSeparator}
|
||||
|
||||
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
|
||||
// Compute the list of erasure set to heal
|
||||
for i, localDisksToHeal := range localDisksInZoneHeal {
|
||||
var erasureSetToHeal []int
|
||||
for _, endpoint := range localDisksToHeal {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{path: nopHeal}
|
||||
|
||||
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
|
||||
// Compute the list of erasure set to heal
|
||||
for i, localDisksToHeal := range localDisksInZoneHeal {
|
||||
var erasureSetToHeal []int
|
||||
for _, endpoint := range localDisksToHeal {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||
}
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||
erasureSetInZoneToHeal[i] = erasureSetToHeal
|
||||
}
|
||||
erasureSetInZoneToHeal[i] = erasureSetToHeal
|
||||
}
|
||||
|
||||
// Heal all erasure sets that need
|
||||
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
|
||||
for _, setIndex := range erasureSetToHeal {
|
||||
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// Heal all erasure sets that need
|
||||
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
|
||||
for _, setIndex := range erasureSetToHeal {
|
||||
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,11 +28,6 @@ import (
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
// Prepare XL/FS backend for benchmark.
|
||||
func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error) {
|
||||
return prepareTestBackend(instanceType)
|
||||
}
|
||||
|
||||
// Benchmark utility functions for ObjectLayer.PutObject().
|
||||
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
|
||||
func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
@@ -40,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -81,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
object := getRandomObjectName()
|
||||
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -135,7 +130,9 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
|
||||
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -149,7 +146,9 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
|
||||
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -163,7 +162,9 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
|
||||
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -180,7 +181,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -242,7 +243,9 @@ func generateBytesData(size int) []byte {
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
||||
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -256,7 +259,9 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
||||
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -273,7 +278,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -317,7 +322,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
@@ -27,6 +26,14 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
type errHashMismatch struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (err *errHashMismatch) Error() string {
|
||||
return err.message
|
||||
}
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow *io.PipeWriter
|
||||
@@ -132,9 +139,9 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err = fmt.Errorf("hashes do not match expected %s, got %s",
|
||||
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))
|
||||
logger.LogIf(context.Background(), err)
|
||||
err := &errHashMismatch{fmt.Sprintf("hashes do not match expected %s, got %s",
|
||||
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
}
|
||||
b.currOffset += int64(len(buf))
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
@@ -36,12 +35,12 @@ type wholeBitrotWriter struct {
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
err := b.disk.AppendFile(b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
}
|
||||
_, err = b.Hash.Write(p)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
@@ -70,14 +69,14 @@ func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error)
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
ctx := context.Background()
|
||||
ctx := GlobalContext
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
|
||||
logger.LogIf(ctx, err)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(context.Background(), errLessData)
|
||||
logger.LogIf(GlobalContext, errLessData)
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
@@ -72,7 +71,7 @@ func (a BitrotAlgorithm) New() hash.Hash {
|
||||
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
|
||||
return hh
|
||||
default:
|
||||
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
|
||||
logger.CriticalIf(GlobalContext, errors.New("Unsupported bitrot algorithm"))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -88,7 +87,7 @@ func (a BitrotAlgorithm) Available() bool {
|
||||
func (a BitrotAlgorithm) String() string {
|
||||
name, ok := bitrotAlgorithms[a]
|
||||
if !ok {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
|
||||
logger.CriticalIf(GlobalContext, errors.New("Unsupported bitrot algorithm"))
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/rest"
|
||||
@@ -62,9 +62,9 @@ func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
|
||||
return fmt.Errorf("Expected platform '%s', found to be running '%s'",
|
||||
s1.MinioPlatform, s2.MinioPlatform)
|
||||
}
|
||||
if s1.MinioEndpoints.Nodes() != s2.MinioEndpoints.Nodes() {
|
||||
return fmt.Errorf("Expected number of endpoints %d, seen %d", s1.MinioEndpoints.Nodes(),
|
||||
s2.MinioEndpoints.Nodes())
|
||||
if s1.MinioEndpoints.NEndpoints() != s2.MinioEndpoints.NEndpoints() {
|
||||
return fmt.Errorf("Expected number of endpoints %d, seen %d", s1.MinioEndpoints.NEndpoints(),
|
||||
s2.MinioEndpoints.NEndpoints())
|
||||
}
|
||||
|
||||
for i, ep := range s1.MinioEndpoints {
|
||||
@@ -110,7 +110,7 @@ func registerBootstrapRESTHandlers(router *mux.Router) {
|
||||
httpTraceHdrs(server.VerifyHandler))
|
||||
}
|
||||
|
||||
// client to talk to bootstrap Nodes.
|
||||
// client to talk to bootstrap NEndpoints.
|
||||
type bootstrapRESTClient struct {
|
||||
endpoint Endpoint
|
||||
restClient *rest.Client
|
||||
@@ -126,7 +126,7 @@ func (client *bootstrapRESTClient) reConnect() {
|
||||
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
|
||||
// after verifying format.json
|
||||
func (client *bootstrapRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
|
||||
return client.callWithContext(context.Background(), method, values, body, length)
|
||||
return client.callWithContext(GlobalContext, method, values, body, length)
|
||||
}
|
||||
|
||||
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
|
||||
@@ -247,7 +247,7 @@ func newBootstrapRESTClient(endpoint Endpoint) (*bootstrapRESTClient, error) {
|
||||
}
|
||||
}
|
||||
|
||||
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout, rest.DefaultRESTTimeout)
|
||||
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)
|
||||
restClient, err := rest.NewClient(serverURL, trFn, newAuthToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
188
cmd/bucket-encryption-handlers.go
Normal file
188
cmd/bucket-encryption-handlers.go
Normal file
@@ -0,0 +1,188 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
// Bucket Encryption configuration file name.
|
||||
bucketSSEConfig = "bucket-encryption.xml"
|
||||
)
|
||||
|
||||
// PutBucketEncryptionHandler - Stores given bucket encryption configuration
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html
|
||||
func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketEncryption")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketEncryption", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// PutBucketEncyrption API requires Content-Md5
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketEncryptionAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Parse bucket encryption xml
|
||||
encConfig, err := validateBucketSSEConfig(io.LimitReader(r.Body, maxBucketSSEConfigSize))
|
||||
if err != nil {
|
||||
apiErr := APIError{
|
||||
Code: "MalformedXML",
|
||||
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrMalformedXML].Description, err),
|
||||
HTTPStatusCode: errorCodes[ErrMalformedXML].HTTPStatusCode,
|
||||
}
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Return error if KMS is not initialized
|
||||
if GlobalKMS == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Store the bucket encryption configuration in the object layer
|
||||
if err = objAPI.SetBucketSSEConfig(ctx, bucket, encConfig); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Update the in-memory bucket encryption cache
|
||||
globalBucketSSEConfigSys.Set(bucket, encConfig)
|
||||
|
||||
// Update peer MinIO servers of the updated bucket encryption config
|
||||
globalNotificationSys.SetBucketSSEConfig(ctx, bucket, encConfig)
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketEncryptionHandler - Returns bucket policy configuration
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html
|
||||
func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketEncryption")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketEncryption", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketEncryptionAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists
|
||||
var err error
|
||||
if _, err = objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch bucket encryption configuration from object layer
|
||||
var encConfig *bucketsse.BucketSSEConfig
|
||||
if encConfig, err = objAPI.GetBucketSSEConfig(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
var encConfigData []byte
|
||||
if encConfigData, err = xml.Marshal(encConfig); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write bucket encryption configuration to client
|
||||
writeSuccessResponseXML(w, encConfigData)
|
||||
}
|
||||
|
||||
// DeleteBucketEncryptionHandler - Removes bucket encryption configuration
|
||||
func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketEncryption")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketEncryption", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketEncryptionAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists
|
||||
var err error
|
||||
if _, err = objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Delete bucket encryption config from object layer
|
||||
if err = objAPI.DeleteBucketSSEConfig(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Remove entry from the in-memory bucket encryption cache
|
||||
globalBucketSSEConfigSys.Remove(bucket)
|
||||
|
||||
// Update peer MinIO servers of the updated bucket encryption config
|
||||
globalNotificationSys.RemoveBucketSSEConfig(ctx, bucket)
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
170
cmd/bucket-encryption.go
Normal file
170
cmd/bucket-encryption.go
Normal file
@@ -0,0 +1,170 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
)
|
||||
|
||||
// BucketSSEConfigSys - in-memory cache of bucket encryption config
|
||||
type BucketSSEConfigSys struct {
|
||||
sync.RWMutex
|
||||
bucketSSEConfigMap map[string]*bucketsse.BucketSSEConfig
|
||||
}
|
||||
|
||||
// NewBucketSSEConfigSys - Creates an empty in-memory bucket encryption configuration cache
|
||||
func NewBucketSSEConfigSys() *BucketSSEConfigSys {
|
||||
return &BucketSSEConfigSys{
|
||||
bucketSSEConfigMap: make(map[string]*bucketsse.BucketSSEConfig),
|
||||
}
|
||||
}
|
||||
|
||||
// load - Loads the bucket encryption configuration for the given list of buckets
|
||||
func (sys *BucketSSEConfigSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
for _, bucket := range buckets {
|
||||
config, err := objAPI.GetBucketSSEConfig(GlobalContext, bucket.Name)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketSSEConfigNotFound); ok {
|
||||
continue
|
||||
}
|
||||
// Quorum errors should be returned.
|
||||
return err
|
||||
}
|
||||
sys.Set(bucket.Name, config)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init - Initializes in-memory bucket encryption config cache for the given list of buckets
|
||||
func (sys *BucketSSEConfigSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// We don't cache bucket encryption config in gateway mode, nothing to do.
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load bucket encryption config cache once during boot.
|
||||
return sys.load(buckets, objAPI)
|
||||
}
|
||||
|
||||
// Get - gets bucket encryption config for the given bucket.
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (config *bucketsse.BucketSSEConfig, ok bool) {
|
||||
// We don't cache bucket encryption config in gateway mode.
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := objAPI.GetBucketSSEConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return cfg, true
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
config, ok = sys.bucketSSEConfigMap[bucket]
|
||||
return
|
||||
}
|
||||
|
||||
// Set - sets bucket encryption config to given bucket name.
|
||||
func (sys *BucketSSEConfigSys) Set(bucket string, config *bucketsse.BucketSSEConfig) {
|
||||
// We don't cache bucket encryption config in gateway mode.
|
||||
if globalIsGateway {
|
||||
return
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
sys.bucketSSEConfigMap[bucket] = config
|
||||
}
|
||||
|
||||
// Remove - removes bucket encryption config for given bucket.
|
||||
func (sys *BucketSSEConfigSys) Remove(bucket string) {
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
delete(sys.bucketSSEConfigMap, bucket)
|
||||
}
|
||||
|
||||
// saveBucketSSEConfig - save bucket encryption config for given bucket.
|
||||
func saveBucketSSEConfig(ctx context.Context, objAPI ObjectLayer, bucket string, config *bucketsse.BucketSSEConfig) error {
|
||||
data, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Path to store bucket encryption config for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
|
||||
return saveConfig(ctx, objAPI, configFile, data)
|
||||
}
|
||||
|
||||
// getBucketSSEConfig - get bucket encryption config for given bucket.
|
||||
func getBucketSSEConfig(objAPI ObjectLayer, bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
// Path to bucket-encryption.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
|
||||
configData, err := readConfig(GlobalContext, objAPI, configFile)
|
||||
if err != nil {
|
||||
if err == errConfigNotFound {
|
||||
err = BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bucketsse.ParseBucketSSEConfig(bytes.NewReader(configData))
|
||||
}
|
||||
|
||||
// removeBucketSSEConfig - removes bucket encryption config for given bucket.
|
||||
func removeBucketSSEConfig(ctx context.Context, objAPI ObjectLayer, bucket string) error {
|
||||
// Path to bucket-encryption.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
|
||||
|
||||
if err := deleteConfig(ctx, objAPI, configFile); err != nil {
|
||||
if err == errConfigNotFound {
|
||||
return BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateBucketSSEConfig parses bucket encryption configuration and validates if it is supported by MinIO.
|
||||
func validateBucketSSEConfig(r io.Reader) (*bucketsse.BucketSSEConfig, error) {
|
||||
encConfig, err := bucketsse.ParseBucketSSEConfig(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(encConfig.Rules) == 1 && encConfig.Rules[0].DefaultEncryptionAction.Algorithm == bucketsse.AES256 {
|
||||
return encConfig, nil
|
||||
}
|
||||
return nil, errors.New("Unsupported bucket encryption configuration")
|
||||
}
|
||||
70
cmd/bucket-encryption_test.go
Normal file
70
cmd/bucket-encryption_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestValidateBucketSSEConfig(t *testing.T) {
|
||||
testCases := []struct {
|
||||
inputXML string
|
||||
expectedErr error
|
||||
shouldPass bool
|
||||
}{
|
||||
// MinIO supported XML
|
||||
{
|
||||
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Rule>
|
||||
<ApplyServerSideEncryptionByDefault>
|
||||
<SSEAlgorithm>AES256</SSEAlgorithm>
|
||||
</ApplyServerSideEncryptionByDefault>
|
||||
</Rule>
|
||||
</ServerSideEncryptionConfiguration>`,
|
||||
expectedErr: nil,
|
||||
shouldPass: true,
|
||||
},
|
||||
// Unsupported XML
|
||||
{
|
||||
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Rule>
|
||||
<ApplyServerSideEncryptionByDefault>
|
||||
<SSEAlgorithm>aws:kms</SSEAlgorithm>
|
||||
<KMSMasterKeyID>arn:aws:kms:us-east-1:1234/5678example</KMSMasterKeyID>
|
||||
</ApplyServerSideEncryptionByDefault>
|
||||
</Rule>
|
||||
</ServerSideEncryptionConfiguration>`,
|
||||
expectedErr: errors.New("Unsupported bucket encryption configuration"),
|
||||
shouldPass: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
_, err := validateBucketSSEConfig(bytes.NewReader([]byte(tc.inputXML)))
|
||||
if tc.shouldPass && err != nil {
|
||||
t.Fatalf("Test case %d: Expected to succeed but got %s", i+1, err)
|
||||
}
|
||||
|
||||
if !tc.shouldPass {
|
||||
if err == nil || err != nil && err.Error() != tc.expectedErr.Error() {
|
||||
t.Fatalf("Test case %d: Expected %s but got %s", i+1, tc.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2015-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -17,12 +17,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -32,23 +30,24 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio-go/v6/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
getBucketVersioningResponse = `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`
|
||||
objectLockConfig = "object-lock.xml"
|
||||
bucketObjectLockEnabledConfigFile = "object-lock-enabled.json"
|
||||
bucketObjectLockEnabledConfig = `{"x-amz-bucket-object-lock-enabled":true}`
|
||||
getBucketVersioningResponse = `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`
|
||||
objectLockConfig = "object-lock.xml"
|
||||
bucketTaggingConfigFile = "tagging.xml"
|
||||
)
|
||||
|
||||
// Check if there are buckets on server without corresponding entry in etcd backend and
|
||||
@@ -70,83 +69,78 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// Get buckets in the DNS
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && err != dns.ErrNoEntriesFound {
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return
|
||||
}
|
||||
|
||||
bucketSet := set.NewStringSet()
|
||||
bucketsSet := set.NewStringSet()
|
||||
bucketsToBeUpdated := set.NewStringSet()
|
||||
bucketsInConflict := set.NewStringSet()
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
r, ok := dnsBuckets[bucket.Name]
|
||||
if !ok {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
// No difference in terms of domainIPs and nothing
|
||||
// has changed so we don't change anything on the etcd.
|
||||
continue
|
||||
}
|
||||
// if domain IPs intersect then it won't be an empty set.
|
||||
// such an intersection means that bucket exists on etcd.
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
// No IPs seem to intersect, this means that bucket exists but has
|
||||
// different IP addresses perhaps from a different deployment.
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
bucketsInConflict.Add(bucket.Name)
|
||||
}
|
||||
|
||||
// Add buckets that are not registered with the DNS
|
||||
// Add/update buckets that are not registered with the DNS
|
||||
g := errgroup.WithNErrs(len(buckets))
|
||||
for index := range buckets {
|
||||
bucketSet.Add(buckets[index].Name)
|
||||
bucketsToBeUpdatedSlice := bucketsToBeUpdated.ToSlice()
|
||||
for index := range bucketsToBeUpdatedSlice {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
r, gerr := globalDNSConfig.Get(buckets[index].Name)
|
||||
if gerr != nil {
|
||||
if gerr == dns.ErrNoEntriesFound {
|
||||
return globalDNSConfig.Put(buckets[index].Name)
|
||||
}
|
||||
return gerr
|
||||
}
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
// No difference in terms of domainIPs and nothing
|
||||
// has changed so we don't change anything on the etcd.
|
||||
return nil
|
||||
}
|
||||
// if domain IPs intersect then it won't be an empty set.
|
||||
// such an intersection means that bucket exists on etcd.
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
return globalDNSConfig.Put(buckets[index].Name)
|
||||
}
|
||||
// No IPs seem to intersect, this means that bucket exists but has
|
||||
// different IP addresses perhaps from a different deployment.
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
return fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", buckets[index].Name, globalDomainIPs.ToSlice())
|
||||
return globalDNSConfig.Put(bucketsToBeUpdatedSlice[index])
|
||||
}, index)
|
||||
}
|
||||
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(GlobalContext, err)
|
||||
}
|
||||
}
|
||||
|
||||
g = errgroup.WithNErrs(len(dnsBuckets))
|
||||
// Remove buckets that are in DNS for this server, but aren't local
|
||||
for index := range dnsBuckets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
// This is a local bucket that exists, so we can continue
|
||||
if bucketSet.Contains(dnsBuckets[index].Key) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is not for our server, so we can continue
|
||||
hostPort := net.JoinHostPort(dnsBuckets[index].Host, string(dnsBuckets[index].Port))
|
||||
if globalDomainIPs.Intersection(set.CreateStringSet(hostPort)).IsEmpty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We go to here, so we know the bucket no longer exists,
|
||||
// but is registered in DNS to this server
|
||||
if err := globalDNSConfig.DeleteRecord(dnsBuckets[index]); err != nil {
|
||||
return fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
dnsBuckets[index].Key, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, index)
|
||||
for _, bucket := range bucketsInConflict.ToSlice() {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
}
|
||||
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
// Remove buckets that are in DNS for this server, but aren't local
|
||||
for bucket, records := range dnsBuckets {
|
||||
if bucketsSet.Contains(bucket) {
|
||||
continue
|
||||
}
|
||||
|
||||
if globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(records)...)).IsEmpty() {
|
||||
// This is not for our server, so we can continue
|
||||
continue
|
||||
}
|
||||
|
||||
// We go to here, so we know the bucket no longer exists,
|
||||
// but is registered in DNS to this server
|
||||
if err = globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
bucket, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -271,7 +265,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
|
||||
accessKey, owner, s3Error := checkRequestAuthTypeToAccessKey(ctx, r, policy.ListAllMyBucketsAction, "", "")
|
||||
if s3Error != ErrNone {
|
||||
if s3Error != ErrNone && s3Error != ErrAccessDenied {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -284,16 +278,11 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
bucketSet := set.NewStringSet()
|
||||
for _, dnsRecord := range dnsBuckets {
|
||||
if bucketSet.Contains(dnsRecord.Key) {
|
||||
continue
|
||||
}
|
||||
for _, dnsRecords := range dnsBuckets {
|
||||
bucketsInfo = append(bucketsInfo, BucketInfo{
|
||||
Name: dnsRecord.Key,
|
||||
Created: dnsRecord.CreationDate,
|
||||
Name: dnsRecords[0].Key,
|
||||
Created: dnsRecords[0].CreationDate,
|
||||
})
|
||||
bucketSet.Add(dnsRecord.Key)
|
||||
}
|
||||
} else {
|
||||
// Invoke the list buckets.
|
||||
@@ -305,32 +294,43 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
}
|
||||
|
||||
// Set prefix value for "s3:prefix" policy conditionals.
|
||||
r.Header.Set("prefix", "")
|
||||
if s3Error == ErrAccessDenied {
|
||||
// Set prefix value for "s3:prefix" policy conditionals.
|
||||
r.Header.Set("prefix", "")
|
||||
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
// err will be nil here as we already called this function
|
||||
// earlier in this request.
|
||||
claims, _ := getClaimsFromToken(r)
|
||||
var newBucketsInfo []BucketInfo
|
||||
for _, bucketInfo := range bucketsInfo {
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
newBucketsInfo = append(newBucketsInfo, bucketInfo)
|
||||
// err will be nil here as we already called this function
|
||||
// earlier in this request.
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
n := 0
|
||||
// Use the following trick to filter in place
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
for _, bucketInfo := range bucketsInfo {
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
bucketsInfo[n] = bucketInfo
|
||||
n++
|
||||
}
|
||||
}
|
||||
bucketsInfo = bucketsInfo[:n]
|
||||
// No buckets can be filtered return access denied error.
|
||||
if len(bucketsInfo) == 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Generate response.
|
||||
response := generateListBucketsResponse(newBucketsInfo)
|
||||
response := generateListBucketsResponse(bucketsInfo)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
|
||||
// Write response.
|
||||
@@ -352,6 +352,13 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Content-Md5 is requied should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Content-Length is required and should be non-zero
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if r.ContentLength <= 0 {
|
||||
@@ -359,34 +366,14 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Content-Md5 is requied should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if _, ok := r.Header["Content-Md5"]; !ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allocate incoming content length bytes.
|
||||
var deleteXMLBytes []byte
|
||||
const maxBodySize = 2 * 1000 * 1024 // The max. XML contains 1000 object names (each at most 1024 bytes long) + XML overhead
|
||||
if r.ContentLength > maxBodySize { // Only allocated memory for at most 1000 objects
|
||||
deleteXMLBytes = make([]byte, maxBodySize)
|
||||
} else {
|
||||
deleteXMLBytes = make([]byte, r.ContentLength)
|
||||
}
|
||||
|
||||
// Read incoming body XML bytes.
|
||||
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponse(ctx, w, toAdminAPIErr(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// The max. XML contains 100000 object names (each at most 1024 bytes long) + XML overhead
|
||||
const maxBodySize = 2 * 100000 * 1024
|
||||
|
||||
// Unmarshal list of keys to be deleted.
|
||||
deleteObjects := &DeleteObjectsRequest{}
|
||||
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
|
||||
if err := xmlDecoder(r.Body, deleteObjects, maxBodySize); err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -411,11 +398,14 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
continue
|
||||
}
|
||||
govBypassPerms := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object.ObjectName)
|
||||
if _, err := enforceRetentionBypassForDelete(ctx, r, bucket, object.ObjectName, getObjectInfoFn, govBypassPerms); err != ErrNone {
|
||||
dErrs[index] = err
|
||||
continue
|
||||
|
||||
if _, ok := globalBucketObjectLockSys.Get(bucket); ok {
|
||||
if err := enforceRetentionBypassForDelete(ctx, r, bucket, object.ObjectName, getObjectInfoFn); err != ErrNone {
|
||||
dErrs[index] = err
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Avoid duplicate objects, we use map to filter them out.
|
||||
if _, ok := objectsToDelete[object.ObjectName]; !ok {
|
||||
objectsToDelete[object.ObjectName] = index
|
||||
@@ -533,24 +523,17 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
if _, err := globalDNSConfig.Get(bucket); err != nil {
|
||||
sr, err := globalDNSConfig.Get(bucket)
|
||||
if err != nil {
|
||||
if err == dns.ErrNoEntriesFound {
|
||||
// Proceed to creating a bucket.
|
||||
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location); err != nil {
|
||||
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location, objectLockEnabled); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if objectLockEnabled {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, []byte(bucketObjectLockEnabledConfig)); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = globalDNSConfig.Put(bucket); err != nil {
|
||||
objectAPI.DeleteBucket(ctx, bucket)
|
||||
objectAPI.DeleteBucket(ctx, bucket, false)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -566,25 +549,29 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
|
||||
}
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBucketAlreadyOwnedByYou), r.URL, guessIsBrowserReq(r))
|
||||
apiErr := ErrBucketAlreadyExists
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(sr)...)).IsEmpty() {
|
||||
apiErr = ErrBucketAlreadyOwnedByYou
|
||||
}
|
||||
// No IPs seem to intersect, this means that bucket exists but has
|
||||
// different IP addresses perhaps from a different deployment.
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. Return appropriate error.
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErr), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Proceed to creating a bucket.
|
||||
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location)
|
||||
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location, objectLockEnabled)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if objectLockEnabled {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, []byte(bucketObjectLockEnabledConfig)); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
globalBucketObjectLockConfig.Set(bucket, Retention{})
|
||||
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, Retention{})
|
||||
if objectLockEnabled && !globalIsGateway {
|
||||
ret := &objectlock.Retention{}
|
||||
globalBucketObjectLockSys.Set(bucket, ret)
|
||||
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, ret)
|
||||
}
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
@@ -620,7 +607,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
|
||||
// To detect if the client has disconnected.
|
||||
r.Body = &detectDisconnect{r.Body, r.Context().Done()}
|
||||
r.Body = &contextReader{r.Body, r.Context()}
|
||||
|
||||
// Require Content-Length to be set in the request
|
||||
size := r.ContentLength
|
||||
@@ -756,10 +743,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
rawReader := hashReader
|
||||
pReader := NewPutObjReader(rawReader, nil, nil)
|
||||
var objectEncryptionKey []byte
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
|
||||
// Check if bucket encryption is enabled
|
||||
_, encEnabled := globalBucketSSEConfigSys.Get(bucket)
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
|
||||
if (globalAutoEncryption || encEnabled) && !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
}
|
||||
// get gateway encryption options
|
||||
@@ -796,7 +785,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
pReader = NewPutObjReader(rawReader, hashReader, objectEncryptionKey)
|
||||
pReader = NewPutObjReader(rawReader, hashReader, &objectEncryptionKey)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -895,15 +884,39 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
forceDelete := false
|
||||
if value := r.Header.Get(xhttp.MinIOForceDelete); value != "" {
|
||||
switch value {
|
||||
case "true":
|
||||
forceDelete = true
|
||||
case "false":
|
||||
default:
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if forceDelete {
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ForceDeleteBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := globalBucketObjectLockSys.Get(bucket); ok && forceDelete {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
deleteBucket := objectAPI.DeleteBucket
|
||||
|
||||
// Attempt to delete bucket.
|
||||
if err := deleteBucket(ctx, bucket); err != nil {
|
||||
if err := deleteBucket(ctx, bucket, forceDelete); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -911,7 +924,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
if globalDNSConfig != nil {
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
// Deleting DNS entry failed, attempt to create the bucket again.
|
||||
objectAPI.MakeBucketWithLocation(ctx, bucket, "")
|
||||
objectAPI.MakeBucketWithLocation(ctx, bucket, "", false)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -996,54 +1009,31 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
config, err := parseObjectLockConfig(r.Body)
|
||||
|
||||
config, err := objectlock.ParseObjectLockConfig(r.Body)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
apiErr.Description = err.Error()
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
if err != nil {
|
||||
aerr := toAPIError(ctx, err)
|
||||
if err == errConfigNotFound {
|
||||
aerr = errorCodes.ToAPIErr(ErrMethodNotAllowed)
|
||||
}
|
||||
writeErrorResponse(ctx, w, aerr, r.URL, guessIsBrowserReq(r))
|
||||
|
||||
if err = objectAPI.SetBucketObjectLockConfig(ctx, bucket, config); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if string(configData) != bucketObjectLockEnabledConfig {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
data, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
configFile = path.Join(bucketConfigPrefix, bucket, objectLockConfig)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if config.Rule != nil {
|
||||
retention := config.ToRetention()
|
||||
globalBucketObjectLockConfig.Set(bucket, retention)
|
||||
globalBucketObjectLockSys.Set(bucket, retention)
|
||||
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, retention)
|
||||
} else {
|
||||
globalBucketObjectLockConfig.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketObjectLockConfig(ctx, bucket)
|
||||
globalBucketObjectLockSys.Set(bucket, &objectlock.Retention{})
|
||||
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, &objectlock.Retention{})
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
@@ -1073,38 +1063,125 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
|
||||
lkCfg, err := objectAPI.GetBucketObjectLockConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
var aerr APIError
|
||||
if err == errConfigNotFound {
|
||||
aerr = errorCodes.ToAPIErr(ErrMethodNotAllowed)
|
||||
} else {
|
||||
aerr = toAPIError(ctx, err)
|
||||
}
|
||||
writeErrorResponse(ctx, w, aerr, r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if string(configData) != bucketObjectLockEnabledConfig {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configFile = path.Join(bucketConfigPrefix, bucket, objectLockConfig)
|
||||
configData, err = readConfig(ctx, objectAPI, configFile)
|
||||
configData, err := xml.Marshal(lkCfg)
|
||||
if err != nil {
|
||||
if err != errConfigNotFound {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if configData, err = xml.Marshal(newObjectLockConfig()); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, configData)
|
||||
}
|
||||
|
||||
// PutBucketTaggingHandler - PUT Bucket tagging.
|
||||
// ----------
|
||||
func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketTagging")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketTagging", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketTaggingAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
tags, err := tags.ParseBucketXML(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
apiErr.Description = err.Error()
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = objectAPI.SetBucketTagging(ctx, bucket, tags); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketTaggingHandler - GET Bucket tagging.
|
||||
// ----------
|
||||
func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketTagging")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketTagging", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// check if user has permissions to perform this operation
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketTaggingAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
t, err := objectAPI.GetBucketTagging(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(t)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, configData)
|
||||
}
|
||||
|
||||
// DeleteBucketTaggingHandler - DELETE Bucket tagging.
|
||||
// ----------
|
||||
func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketTagging")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketTagging", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketTaggingAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err := objectAPI.DeleteBucketTagging(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -29,6 +28,51 @@ import (
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
)
|
||||
|
||||
// Wrapper for calling RemoveBucket HTTP handler tests for both XL multiple disks and single node setup.
|
||||
func TestRemoveBucketHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"})
|
||||
}
|
||||
|
||||
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewBuffer([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Error uploading object: <ERROR> %v", err)
|
||||
}
|
||||
|
||||
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
// construct HTTP request for DELETE bucket.
|
||||
req, err := newTestSignedRequestV4("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
|
||||
}
|
||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||
// Call the ServeHTTP to execute the handler.
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
switch rec.Code {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent:
|
||||
t.Fatalf("Test %v: expected failure, but succeeded with %v", instanceType, rec.Code)
|
||||
}
|
||||
|
||||
// Verify response of the V2 signed HTTP request.
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for DELETE bucket.
|
||||
reqV2, err := newTestSignedRequestV2("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
|
||||
}
|
||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||
// Call the ServeHTTP to execute the handler.
|
||||
apiRouter.ServeHTTP(recV2, reqV2)
|
||||
switch recV2.Code {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent:
|
||||
t.Fatalf("Test %v: expected failure, but succeeded with %v", instanceType, recV2.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling GetBucketPolicy HTTP handler tests for both XL multiple disks and single node setup.
|
||||
func TestGetBucketLocationHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"})
|
||||
@@ -625,7 +669,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
for i := 0; i < 10; i++ {
|
||||
objectName := "test-object-" + strconv.Itoa(i)
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
|
||||
_, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
||||
|
||||
@@ -22,9 +22,10 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/lifecycle"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -48,6 +49,12 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// PutBucketLifecycle always needs a Content-Md5
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketLifecycleAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -59,15 +66,9 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
// PutBucketLifecycle always needs a Content-Md5
|
||||
if _, ok := r.Header["Content-Md5"]; !ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -76,7 +77,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
globalLifecycleSys.Set(bucket, *bucketLifecycle)
|
||||
globalLifecycleSys.Set(bucket, bucketLifecycle)
|
||||
globalNotificationSys.SetBucketLifecycle(ctx, bucket, bucketLifecycle)
|
||||
|
||||
// Success.
|
||||
|
||||
303
cmd/bucket-lifecycle-handlers_test.go
Normal file
303
cmd/bucket-lifecycle-handlers_test.go
Normal file
@@ -0,0 +1,303 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
)
|
||||
|
||||
// Test S3 Bucket lifecycle APIs with wrong credentials
|
||||
func TestBucketLifecycleWrongCredentials(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testBucketLifecycleHandlersWrongCredentials, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
|
||||
}
|
||||
|
||||
// Test for authentication
|
||||
func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
method string
|
||||
bucketName string
|
||||
accessKey string
|
||||
secretKey string
|
||||
// Sent body
|
||||
body []byte
|
||||
// Expected response
|
||||
expectedRespStatus int
|
||||
lifecycleResponse []byte
|
||||
errorResponse APIErrorResponse
|
||||
shouldPass bool
|
||||
}{
|
||||
// GET empty credentials
|
||||
{
|
||||
method: "GET", bucketName: bucketName,
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
lifecycleResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "AccessDenied",
|
||||
Message: "Access Denied.",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
// GET wrong credentials
|
||||
{
|
||||
method: "GET", bucketName: bucketName,
|
||||
accessKey: "abcd",
|
||||
secretKey: "abcd",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
lifecycleResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidAccessKeyId",
|
||||
Message: "The access key ID you provided does not exist in our records.",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
// PUT empty credentials
|
||||
{
|
||||
method: "PUT",
|
||||
bucketName: bucketName,
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
lifecycleResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "AccessDenied",
|
||||
Message: "Access Denied.",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
// PUT wrong credentials
|
||||
{
|
||||
method: "PUT",
|
||||
bucketName: bucketName,
|
||||
accessKey: "abcd",
|
||||
secretKey: "abcd",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
lifecycleResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidAccessKeyId",
|
||||
Message: "The access key ID you provided does not exist in our records.",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
// DELETE empty credentials
|
||||
{
|
||||
method: "DELETE",
|
||||
bucketName: bucketName,
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
lifecycleResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "AccessDenied",
|
||||
Message: "Access Denied.",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
// DELETE wrong credentials
|
||||
{
|
||||
method: "DELETE",
|
||||
bucketName: bucketName,
|
||||
accessKey: "abcd",
|
||||
secretKey: "abcd",
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
lifecycleResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidAccessKeyId",
|
||||
Message: "The access key ID you provided does not exist in our records.",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
}
|
||||
|
||||
testBucketLifecycle(obj, instanceType, bucketName, apiRouter, t, testCases)
|
||||
}
|
||||
|
||||
// Test S3 Bucket lifecycle APIs
|
||||
func TestBucketLifecycle(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testBucketLifecycleHandlers, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
|
||||
}
|
||||
|
||||
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
|
||||
// Tests are related and the order is important.
|
||||
func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
creds auth.Credentials, t *testing.T) {
|
||||
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
method string
|
||||
bucketName string
|
||||
accessKey string
|
||||
secretKey string
|
||||
// Sent body
|
||||
body []byte
|
||||
// Expected response
|
||||
expectedRespStatus int
|
||||
lifecycleResponse []byte
|
||||
errorResponse APIErrorResponse
|
||||
shouldPass bool
|
||||
}{
|
||||
// Test case - 1.
|
||||
// Filter contains more than (Prefix,Tag,And) rule
|
||||
{
|
||||
method: "PUT",
|
||||
bucketName: bucketName,
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
body: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix><Tag><Key>Key1</Key><Value>Value1</Value></Tag></Filter><Status>Enabled</Status><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
lifecycleResponse: []byte(``),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidRequest",
|
||||
Message: "Filter must have exactly one of Prefix, Tag, or And specified",
|
||||
},
|
||||
|
||||
shouldPass: false,
|
||||
},
|
||||
// Date contains wrong format
|
||||
{
|
||||
method: "PUT",
|
||||
bucketName: bucketName,
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
body: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix><Tag><Key>Key1</Key><Value>Value1</Value></Tag></Filter><Status>Enabled</Status><Expiration><Date>365</Date></Expiration></Rule></LifecycleConfiguration>`),
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
lifecycleResponse: []byte(``),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidRequest",
|
||||
Message: "Date must be provided in ISO 8601 format",
|
||||
},
|
||||
|
||||
shouldPass: false,
|
||||
},
|
||||
{
|
||||
method: "PUT",
|
||||
bucketName: bucketName,
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
body: []byte(`<?xml version="1.0" encoding="UTF-8"?><LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix></Filter><Status>Enabled</Status><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
|
||||
expectedRespStatus: http.StatusOK,
|
||||
lifecycleResponse: []byte(``),
|
||||
errorResponse: APIErrorResponse{},
|
||||
shouldPass: true,
|
||||
},
|
||||
{
|
||||
method: "GET",
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
bucketName: bucketName,
|
||||
body: []byte(``),
|
||||
expectedRespStatus: http.StatusOK,
|
||||
lifecycleResponse: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Status>Enabled</Status><Filter><Prefix>logs/</Prefix></Filter><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
|
||||
errorResponse: APIErrorResponse{},
|
||||
shouldPass: true,
|
||||
},
|
||||
{
|
||||
method: "DELETE",
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
bucketName: bucketName,
|
||||
body: []byte(``),
|
||||
expectedRespStatus: http.StatusNoContent,
|
||||
lifecycleResponse: []byte(``),
|
||||
errorResponse: APIErrorResponse{},
|
||||
shouldPass: true,
|
||||
},
|
||||
{
|
||||
method: "GET",
|
||||
accessKey: creds.AccessKey,
|
||||
secretKey: creds.SecretKey,
|
||||
bucketName: bucketName,
|
||||
body: []byte(``),
|
||||
expectedRespStatus: http.StatusNotFound,
|
||||
lifecycleResponse: []byte(``),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "NoSuchLifecycleConfiguration",
|
||||
Message: "The lifecycle configuration does not exist",
|
||||
},
|
||||
shouldPass: false,
|
||||
},
|
||||
}
|
||||
|
||||
testBucketLifecycle(obj, instanceType, bucketName, apiRouter, t, testCases)
|
||||
}
|
||||
|
||||
// testBucketLifecycle is a generic testing of lifecycle requests
|
||||
func testBucketLifecycle(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
t *testing.T, testCases []struct {
|
||||
method string
|
||||
bucketName string
|
||||
accessKey string
|
||||
secretKey string
|
||||
body []byte
|
||||
expectedRespStatus int
|
||||
lifecycleResponse []byte
|
||||
errorResponse APIErrorResponse
|
||||
shouldPass bool
|
||||
}) {
|
||||
|
||||
for i, testCase := range testCases {
|
||||
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
// construct HTTP request
|
||||
req, err := newTestSignedRequestV4(testCase.method, getBucketLifecycleURL("", testCase.bucketName),
|
||||
int64(len(testCase.body)), bytes.NewReader(testCase.body), testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||
// Call the ServeHTTP to execute the handler.
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
if rec.Code != testCase.expectedRespStatus {
|
||||
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
|
||||
}
|
||||
if testCase.shouldPass && !bytes.Equal(testCase.lifecycleResponse, rec.Body.Bytes()) {
|
||||
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.lifecycleResponse), rec.Body.String())
|
||||
}
|
||||
errorResponse := APIErrorResponse{}
|
||||
err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse)
|
||||
if err != nil && !testCase.shouldPass {
|
||||
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, rec.Body.String())
|
||||
}
|
||||
if errorResponse.Resource != testCase.errorResponse.Resource {
|
||||
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
|
||||
}
|
||||
if errorResponse.Message != testCase.errorResponse.Message {
|
||||
t.Errorf("Test %d: %s: Expected the error message to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Message, errorResponse.Message)
|
||||
}
|
||||
if errorResponse.Code != testCase.errorResponse.Code {
|
||||
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -20,13 +20,10 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/lifecycle"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -38,11 +35,11 @@ const (
|
||||
// LifecycleSys - Bucket lifecycle subsystem.
|
||||
type LifecycleSys struct {
|
||||
sync.RWMutex
|
||||
bucketLifecycleMap map[string]lifecycle.Lifecycle
|
||||
bucketLifecycleMap map[string]*lifecycle.Lifecycle
|
||||
}
|
||||
|
||||
// Set - sets lifecycle config to given bucket name.
|
||||
func (sys *LifecycleSys) Set(bucketName string, lifecycle lifecycle.Lifecycle) {
|
||||
func (sys *LifecycleSys) Set(bucketName string, lifecycle *lifecycle.Lifecycle) {
|
||||
if globalIsGateway {
|
||||
// no-op
|
||||
return
|
||||
@@ -55,7 +52,7 @@ func (sys *LifecycleSys) Set(bucketName string, lifecycle lifecycle.Lifecycle) {
|
||||
}
|
||||
|
||||
// Get - gets lifecycle config associated to a given bucket name.
|
||||
func (sys *LifecycleSys) Get(bucketName string) (lifecycle lifecycle.Lifecycle, ok bool) {
|
||||
func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, ok bool) {
|
||||
if globalIsGateway {
|
||||
// When gateway is enabled, no cached value
|
||||
// is used to validate life cycle policies.
|
||||
@@ -63,14 +60,18 @@ func (sys *LifecycleSys) Get(bucketName string) (lifecycle lifecycle.Lifecycle,
|
||||
if objAPI == nil {
|
||||
return
|
||||
}
|
||||
l, err := objAPI.GetBucketLifecycle(context.Background(), bucketName)
|
||||
return *l, err == nil
|
||||
|
||||
l, err := objAPI.GetBucketLifecycle(GlobalContext, bucketName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return l, true
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
l, ok := sys.bucketLifecycleMap[bucketName]
|
||||
return l, ok
|
||||
lc, ok = sys.bucketLifecycleMap[bucketName]
|
||||
return
|
||||
}
|
||||
|
||||
func saveLifecycleConfig(ctx context.Context, objAPI ObjectLayer, bucketName string, bucketLifecycle *lifecycle.Lifecycle) error {
|
||||
@@ -88,7 +89,7 @@ func saveLifecycleConfig(ctx context.Context, objAPI ObjectLayer, bucketName str
|
||||
func getLifecycleConfig(objAPI ObjectLayer, bucketName string) (*lifecycle.Lifecycle, error) {
|
||||
// Construct path to lifecycle.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketLifecycleConfig)
|
||||
configData, err := readConfig(context.Background(), objAPI, configFile)
|
||||
configData, err := readConfig(GlobalContext, objAPI, configFile)
|
||||
if err != nil {
|
||||
if err == errConfigNotFound {
|
||||
err = BucketLifecycleNotFound{Bucket: bucketName}
|
||||
@@ -103,8 +104,8 @@ func removeLifecycleConfig(ctx context.Context, objAPI ObjectLayer, bucketName s
|
||||
// Construct path to lifecycle.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketLifecycleConfig)
|
||||
|
||||
if err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile); err != nil {
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
if err := deleteConfig(ctx, objAPI, configFile); err != nil {
|
||||
if err == errConfigNotFound {
|
||||
return BucketLifecycleNotFound{Bucket: bucketName}
|
||||
}
|
||||
return err
|
||||
@@ -115,7 +116,7 @@ func removeLifecycleConfig(ctx context.Context, objAPI ObjectLayer, bucketName s
|
||||
// NewLifecycleSys - creates new lifecycle system.
|
||||
func NewLifecycleSys() *LifecycleSys {
|
||||
return &LifecycleSys{
|
||||
bucketLifecycleMap: make(map[string]lifecycle.Lifecycle),
|
||||
bucketLifecycleMap: make(map[string]*lifecycle.Lifecycle),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,52 +132,29 @@ func (sys *LifecycleSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
|
||||
// Initializing lifecycle needs a retry mechanism for
|
||||
// the following reasons:
|
||||
// - Read quorum is lost just after the initialization
|
||||
// of the object layer.
|
||||
retryTimerCh := newRetryTimerSimple(doneCh)
|
||||
for {
|
||||
select {
|
||||
case <-retryTimerCh:
|
||||
// Load LifecycleSys once during boot.
|
||||
if err := sys.load(buckets, objAPI); err != nil {
|
||||
if err == errDiskNotFound ||
|
||||
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
|
||||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
|
||||
logger.Info("Waiting for lifecycle subsystem to be initialized..")
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case <-globalOSSignalCh:
|
||||
return fmt.Errorf("Initializing Lifecycle sub-system gracefully stopped")
|
||||
}
|
||||
}
|
||||
// Load LifecycleSys once during boot.
|
||||
return sys.load(buckets, objAPI)
|
||||
}
|
||||
|
||||
// Loads lifecycle policies for all buckets into LifecycleSys.
|
||||
func (sys *LifecycleSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
for _, bucket := range buckets {
|
||||
config, err := objAPI.GetBucketLifecycle(context.Background(), bucket.Name)
|
||||
config, err := getLifecycleConfig(objAPI, bucket.Name)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketLifecycleNotFound); ok {
|
||||
sys.Remove(bucket.Name)
|
||||
continue
|
||||
}
|
||||
continue
|
||||
// Quorum errors should be returned.
|
||||
return err
|
||||
}
|
||||
|
||||
sys.Set(bucket.Name, *config)
|
||||
sys.Set(bucket.Name, config)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove - removes policy for given bucket name.
|
||||
// Remove - removes lifecycle config for given bucket name.
|
||||
func (sys *LifecycleSys) Remove(bucketName string) {
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
165
cmd/bucket-meta.go
Normal file
165
cmd/bucket-meta.go
Normal file
@@ -0,0 +1,165 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
legacyBucketObjectLockEnabledConfigFile = "object-lock-enabled.json"
|
||||
legacyBucketObjectLockEnabledConfig = `{"x-amz-bucket-object-lock-enabled":true}`
|
||||
|
||||
bucketMetadataFile = ".metadata.bin"
|
||||
bucketMetadataFormat = 1
|
||||
bucketMetadataVersion = 1
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
|
||||
// bucketMetadata contains bucket metadata.
|
||||
// When adding/removing fields, regenerate the marshal code using the go generate above.
|
||||
// Only changing meaning of fields requires a version bump.
|
||||
// bucketMetadataFormat refers to the format.
|
||||
// bucketMetadataVersion can be used to track a rolling upgrade of a field.
|
||||
type bucketMetadata struct {
|
||||
Name string
|
||||
Created time.Time
|
||||
LockEnabled bool
|
||||
}
|
||||
|
||||
// newBucketMetadata creates bucketMetadata with the supplied name and Created to Now.
|
||||
func newBucketMetadata(name string) bucketMetadata {
|
||||
return bucketMetadata{
|
||||
Name: name,
|
||||
Created: UTCNow(),
|
||||
}
|
||||
}
|
||||
|
||||
// loadBucketMeta loads the metadata of bucket by name from ObjectLayer o.
|
||||
// If an error is returned the returned metadata will be default initialized.
|
||||
func loadBucketMeta(ctx context.Context, o ObjectLayer, name string) (bucketMetadata, error) {
|
||||
b := newBucketMetadata(name)
|
||||
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
|
||||
data, err := readConfig(ctx, o, configFile)
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
if len(data) <= 4 {
|
||||
return b, fmt.Errorf("loadBucketMetadata: no data")
|
||||
}
|
||||
// Read header
|
||||
switch binary.LittleEndian.Uint16(data[0:2]) {
|
||||
case bucketMetadataFormat:
|
||||
default:
|
||||
return b, fmt.Errorf("loadBucketMetadata: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
|
||||
}
|
||||
switch binary.LittleEndian.Uint16(data[2:4]) {
|
||||
case bucketMetadataVersion:
|
||||
default:
|
||||
return b, fmt.Errorf("loadBucketMetadata: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
|
||||
}
|
||||
|
||||
// OK, parse data.
|
||||
_, err = b.UnmarshalMsg(data[4:])
|
||||
return b, err
|
||||
}
|
||||
|
||||
var errMetaDataConverted = errors.New("metadata converted")
|
||||
|
||||
// loadBucketMetadata loads and migrates to bucket metadata.
|
||||
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (bucketMetadata, error) {
|
||||
meta, err := loadBucketMeta(ctx, objectAPI, bucket)
|
||||
if err == nil {
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
if err != errConfigNotFound {
|
||||
return meta, err
|
||||
}
|
||||
|
||||
// Control here means old bucket without bucket metadata. Hence we migrate existing settings.
|
||||
if err = meta.convertLegacyLockconfig(ctx, objectAPI); err != nil {
|
||||
return meta, err
|
||||
}
|
||||
|
||||
return meta, errMetaDataConverted
|
||||
}
|
||||
|
||||
func (b *bucketMetadata) convertLegacyLockconfig(ctx context.Context, objectAPI ObjectLayer) error {
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, legacyBucketObjectLockEnabledConfigFile)
|
||||
|
||||
save := func() error {
|
||||
if err := b.save(ctx, objectAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errConfigNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
return save()
|
||||
}
|
||||
|
||||
if string(configData) != legacyBucketObjectLockEnabledConfig {
|
||||
return fmt.Errorf("content mismatch in config file %v", configFile)
|
||||
}
|
||||
|
||||
b.LockEnabled = true
|
||||
return save()
|
||||
}
|
||||
|
||||
// save config to supplied ObjectLayer o.
|
||||
func (b *bucketMetadata) save(ctx context.Context, o ObjectLayer) error {
|
||||
data := make([]byte, 4, b.Msgsize()+4)
|
||||
// Put header
|
||||
binary.LittleEndian.PutUint16(data[0:2], bucketMetadataFormat)
|
||||
binary.LittleEndian.PutUint16(data[2:4], bucketMetadataVersion)
|
||||
|
||||
// Add data
|
||||
data, err := b.MarshalMsg(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, bucketMetadataFile)
|
||||
return saveConfig(ctx, o, configFile, data)
|
||||
}
|
||||
|
||||
// removeBucketMeta bucket metadata.
|
||||
// If config does not exist no error is returned.
|
||||
func removeBucketMeta(ctx context.Context, obj ObjectLayer, bucket string) error {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketMetadataFile)
|
||||
if err := deleteConfig(ctx, obj, configFile); err != nil && err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
160
cmd/bucket-meta_gen.go
Normal file
160
cmd/bucket-meta_gen.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *bucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Name":
|
||||
z.Name, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
case "Created":
|
||||
z.Created, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Created")
|
||||
return
|
||||
}
|
||||
case "LockEnabled":
|
||||
z.LockEnabled, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LockEnabled")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z bucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 3
|
||||
// write "Name"
|
||||
err = en.Append(0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Name)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
// write "Created"
|
||||
err = en.Append(0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.Created)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Created")
|
||||
return
|
||||
}
|
||||
// write "LockEnabled"
|
||||
err = en.Append(0xab, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBool(z.LockEnabled)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LockEnabled")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z bucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 3
|
||||
// string "Name"
|
||||
o = append(o, 0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
// string "Created"
|
||||
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
|
||||
o = msgp.AppendTime(o, z.Created)
|
||||
// string "LockEnabled"
|
||||
o = append(o, 0xab, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendBool(o, z.LockEnabled)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *bucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Name":
|
||||
z.Name, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
case "Created":
|
||||
z.Created, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Created")
|
||||
return
|
||||
}
|
||||
case "LockEnabled":
|
||||
z.LockEnabled, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LockEnabled")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z bucketMetadata) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize
|
||||
return
|
||||
}
|
||||
123
cmd/bucket-meta_gen_test.go
Normal file
123
cmd/bucket-meta_gen_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalbucketMetadata(t *testing.T) {
|
||||
v := bucketMetadata{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgbucketMetadata(b *testing.B) {
|
||||
v := bucketMetadata{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgbucketMetadata(b *testing.B) {
|
||||
v := bucketMetadata{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalbucketMetadata(b *testing.B) {
|
||||
v := bucketMetadata{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodebucketMetadata(t *testing.T) {
|
||||
v := bucketMetadata{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodebucketMetadata Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := bucketMetadata{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodebucketMetadata(b *testing.B) {
|
||||
v := bucketMetadata{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodebucketMetadata(b *testing.B) {
|
||||
v := bucketMetadata{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"reflect"
|
||||
"time"
|
||||
@@ -30,8 +29,8 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -84,8 +83,8 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
config.SetRegion(globalServerRegion)
|
||||
config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
notificationBytes, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
@@ -116,7 +115,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
// With newer config disallowing changing / turning off
|
||||
// notification targets without removing ARN in notification
|
||||
// configuration we won't see this problem anymore.
|
||||
if reflect.DeepEqual(queue.ARN, arnErr.ARN) {
|
||||
if reflect.DeepEqual(queue.ARN, arnErr.ARN) && i < len(config.QueueList) {
|
||||
config.QueueList = append(config.QueueList[:i],
|
||||
config.QueueList[i+1:]...)
|
||||
}
|
||||
@@ -282,16 +281,13 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
w.Header().Set(xhttp.ContentType, "text/event-stream")
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
|
||||
// Listen Publisher and peer-listen-client uses nonblocking send and hence does not wait for slow receivers.
|
||||
// Use buffered channel to take care of burst sends or slow w.Write()
|
||||
listenCh := make(chan interface{}, 4000)
|
||||
|
||||
peers := getRestClients(globalEndpoints)
|
||||
peers := newPeerRestClients(globalEndpoints)
|
||||
|
||||
globalHTTPListen.Subscribe(listenCh, doneCh, func(evI interface{}) bool {
|
||||
globalHTTPListen.Subscribe(listenCh, ctx.Done(), func(evI interface{}) bool {
|
||||
ev, ok := evI.(event.Event)
|
||||
if !ok {
|
||||
return false
|
||||
@@ -299,18 +295,14 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
if ev.S3.Bucket.Name != values.Get(peerRESTListenBucket) {
|
||||
return false
|
||||
}
|
||||
objectName, uerr := url.QueryUnescape(ev.S3.Object.Key)
|
||||
if uerr != nil {
|
||||
objectName = ev.S3.Object.Key
|
||||
}
|
||||
return len(rulesMap.Match(ev.EventName, objectName).ToSlice()) != 0
|
||||
return rulesMap.MatchSimple(ev.EventName, ev.S3.Object.Key)
|
||||
})
|
||||
|
||||
for _, peer := range peers {
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
peer.Listen(listenCh, doneCh, values)
|
||||
peer.Listen(listenCh, ctx.Done(), values)
|
||||
}
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
@@ -336,7 +328,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-GlobalServiceDoneCh:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
535
cmd/bucket-object-lock.go
Normal file
535
cmd/bucket-object-lock.go
Normal file
@@ -0,0 +1,535 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"math"
|
||||
"net/http"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
// BucketObjectLockSys - map of bucket and retention configuration.
|
||||
type BucketObjectLockSys struct {
|
||||
sync.RWMutex
|
||||
retentionMap map[string]*objectlock.Retention
|
||||
}
|
||||
|
||||
// Set - set retention configuration.
|
||||
func (sys *BucketObjectLockSys) Set(bucketName string, retention *objectlock.Retention) {
|
||||
if globalIsGateway {
|
||||
// no-op
|
||||
return
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
sys.retentionMap[bucketName] = retention
|
||||
sys.Unlock()
|
||||
}
|
||||
|
||||
// Get - Get retention configuration.
|
||||
func (sys *BucketObjectLockSys) Get(bucketName string) (r *objectlock.Retention, ok bool) {
|
||||
if globalIsGateway {
|
||||
// When gateway is enabled, no cached value
|
||||
// is used to validate bucket object lock configuration
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
lc, err := objAPI.GetBucketObjectLockConfig(GlobalContext, bucketName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return lc.ToRetention(), true
|
||||
}
|
||||
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
r, ok = sys.retentionMap[bucketName]
|
||||
return r, ok
|
||||
}
|
||||
|
||||
// Remove - removes retention sysuration.
|
||||
func (sys *BucketObjectLockSys) Remove(bucketName string) {
|
||||
sys.Lock()
|
||||
delete(sys.retentionMap, bucketName)
|
||||
sys.Unlock()
|
||||
}
|
||||
|
||||
// Similar to enforceRetentionBypassForDelete but for WebUI
|
||||
func enforceRetentionBypassForDeleteWeb(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, govBypassPerms bool) APIErrorCode {
|
||||
opts, err := getOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
lhold := objectlock.GetObjectLegalHoldMeta(oi.UserDefined)
|
||||
if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn {
|
||||
return ErrObjectLocked
|
||||
}
|
||||
|
||||
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
|
||||
if ret.Mode.Valid() {
|
||||
switch ret.Mode {
|
||||
case objectlock.RetCompliance:
|
||||
// In compliance mode, a protected object version can't be overwritten
|
||||
// or deleted by any user, including the root user in your AWS account.
|
||||
// When an object is locked in compliance mode, its retention mode can't
|
||||
// be changed, and its retention period can't be shortened. Compliance mode
|
||||
// ensures that an object version can't be overwritten or deleted for the
|
||||
// duration of the retention period.
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return ErrObjectLocked
|
||||
}
|
||||
|
||||
if !ret.RetainUntilDate.Before(t) {
|
||||
return ErrObjectLocked
|
||||
}
|
||||
return ErrNone
|
||||
case objectlock.RetGovernance:
|
||||
// In governance mode, users can't overwrite or delete an object
|
||||
// version or alter its lock settings unless they have special
|
||||
// permissions. With governance mode, you protect objects against
|
||||
// being deleted by most users, but you can still grant some users
|
||||
// permission to alter the retention settings or delete the object
|
||||
// if necessary. You can also use governance mode to test retention-period
|
||||
// settings before creating a compliance-mode retention period.
|
||||
// To override or remove governance-mode retention settings, a
|
||||
// user must have the s3:BypassGovernanceRetention permission
|
||||
// and must explicitly include x-amz-bypass-governance-retention:true
|
||||
// as a request header with any request that requires overriding
|
||||
// governance mode.
|
||||
byPassSet := govBypassPerms && objectlock.IsObjectLockGovernanceBypassSet(r.Header)
|
||||
if !byPassSet {
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return ErrObjectLocked
|
||||
}
|
||||
|
||||
if !ret.RetainUntilDate.Before(t) {
|
||||
return ErrObjectLocked
|
||||
}
|
||||
|
||||
if !govBypassPerms {
|
||||
return ErrObjectLocked
|
||||
}
|
||||
|
||||
return ErrNone
|
||||
}
|
||||
}
|
||||
}
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// enforceRetentionForDeletion checks if it is appropriate to remove an
|
||||
// object according to locking configuration when this is lifecycle/ bucket quota asking.
|
||||
func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locked bool) {
|
||||
lhold := objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
|
||||
if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn {
|
||||
return true
|
||||
}
|
||||
|
||||
ret := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
|
||||
if ret.Mode.Valid() && (ret.Mode == objectlock.RetCompliance || ret.Mode == objectlock.RetGovernance) {
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return true
|
||||
}
|
||||
if ret.RetainUntilDate.After(t) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// enforceRetentionBypassForDelete enforces whether an existing object under governance can be deleted
|
||||
// with governance bypass headers set in the request.
|
||||
// Objects under site wide WORM can never be overwritten.
|
||||
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
|
||||
// governance bypass headers are set and user has governance bypass permissions.
|
||||
// Objects in "Compliance" mode can be overwritten only if retention date is past.
|
||||
func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn) APIErrorCode {
|
||||
opts, err := getOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
lhold := objectlock.GetObjectLegalHoldMeta(oi.UserDefined)
|
||||
if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn {
|
||||
return ErrObjectLocked
|
||||
}
|
||||
|
||||
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
|
||||
if ret.Mode.Valid() {
|
||||
switch ret.Mode {
|
||||
case objectlock.RetCompliance:
|
||||
// In compliance mode, a protected object version can't be overwritten
|
||||
// or deleted by any user, including the root user in your AWS account.
|
||||
// When an object is locked in compliance mode, its retention mode can't
|
||||
// be changed, and its retention period can't be shortened. Compliance mode
|
||||
// ensures that an object version can't be overwritten or deleted for the
|
||||
// duration of the retention period.
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return ErrObjectLocked
|
||||
}
|
||||
|
||||
if !ret.RetainUntilDate.Before(t) {
|
||||
return ErrObjectLocked
|
||||
}
|
||||
return ErrNone
|
||||
case objectlock.RetGovernance:
|
||||
// In governance mode, users can't overwrite or delete an object
|
||||
// version or alter its lock settings unless they have special
|
||||
// permissions. With governance mode, you protect objects against
|
||||
// being deleted by most users, but you can still grant some users
|
||||
// permission to alter the retention settings or delete the object
|
||||
// if necessary. You can also use governance mode to test retention-period
|
||||
// settings before creating a compliance-mode retention period.
|
||||
// To override or remove governance-mode retention settings, a
|
||||
// user must have the s3:BypassGovernanceRetention permission
|
||||
// and must explicitly include x-amz-bypass-governance-retention:true
|
||||
// as a request header with any request that requires overriding
|
||||
// governance mode.
|
||||
//
|
||||
byPassSet := objectlock.IsObjectLockGovernanceBypassSet(r.Header)
|
||||
if !byPassSet {
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return ErrObjectLocked
|
||||
}
|
||||
|
||||
if !ret.RetainUntilDate.Before(t) {
|
||||
return ErrObjectLocked
|
||||
}
|
||||
return ErrNone
|
||||
}
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes
|
||||
// If you try to delete objects protected by governance mode and have s3:BypassGovernanceRetention
|
||||
// or s3:GetBucketObjectLockConfiguration permissions, the operation will succeed.
|
||||
govBypassPerms1 := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object)
|
||||
govBypassPerms2 := checkRequestAuthType(ctx, r, policy.GetBucketObjectLockConfigurationAction, bucket, object)
|
||||
if govBypassPerms1 != ErrNone && govBypassPerms2 != ErrNone {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
}
|
||||
}
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// enforceRetentionBypassForPut enforces whether an existing object under governance can be overwritten
|
||||
// with governance bypass headers set in the request.
|
||||
// Objects under site wide WORM cannot be overwritten.
|
||||
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
|
||||
// governance bypass headers are set and user has governance bypass permissions.
|
||||
// Objects in compliance mode can be overwritten only if retention date is being extended. No mode change is permitted.
|
||||
func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, objRetention *objectlock.ObjectRetention, cred auth.Credentials, owner bool, claims map[string]interface{}) (ObjectInfo, APIErrorCode) {
|
||||
byPassSet := objectlock.IsObjectLockGovernanceBypassSet(r.Header)
|
||||
opts, err := getOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return oi, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return oi, ErrObjectLocked
|
||||
}
|
||||
|
||||
// Pass in relative days from current time, to additionally to verify "object-lock-remaining-retention-days" policy if any.
|
||||
days := int(math.Ceil(math.Abs(objRetention.RetainUntilDate.Sub(t).Hours()) / 24))
|
||||
|
||||
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
|
||||
if ret.Mode.Valid() {
|
||||
// Retention has expired you may change whatever you like.
|
||||
if ret.RetainUntilDate.Before(t) {
|
||||
perm := isPutRetentionAllowed(bucket, object,
|
||||
days, objRetention.RetainUntilDate.Time,
|
||||
objRetention.Mode, byPassSet, r, cred,
|
||||
owner, claims)
|
||||
return oi, perm
|
||||
}
|
||||
|
||||
switch ret.Mode {
|
||||
case objectlock.RetGovernance:
|
||||
govPerm := isPutRetentionAllowed(bucket, object, days,
|
||||
objRetention.RetainUntilDate.Time, objRetention.Mode,
|
||||
byPassSet, r, cred, owner, claims)
|
||||
// Governance mode retention period cannot be shortened, if x-amz-bypass-governance is not set.
|
||||
if !byPassSet {
|
||||
if objRetention.Mode != objectlock.RetGovernance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
|
||||
return oi, ErrObjectLocked
|
||||
}
|
||||
}
|
||||
return oi, govPerm
|
||||
case objectlock.RetCompliance:
|
||||
// Compliance retention mode cannot be changed or shortened.
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes
|
||||
if objRetention.Mode != objectlock.RetCompliance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
|
||||
return oi, ErrObjectLocked
|
||||
}
|
||||
compliancePerm := isPutRetentionAllowed(bucket, object,
|
||||
days, objRetention.RetainUntilDate.Time, objRetention.Mode,
|
||||
false, r, cred, owner, claims)
|
||||
return oi, compliancePerm
|
||||
}
|
||||
return oi, ErrNone
|
||||
} // No pre-existing retention metadata present.
|
||||
|
||||
perm := isPutRetentionAllowed(bucket, object,
|
||||
days, objRetention.RetainUntilDate.Time,
|
||||
objRetention.Mode, byPassSet, r, cred, owner, claims)
|
||||
return oi, perm
|
||||
}
|
||||
|
||||
// checkPutObjectLockAllowed enforces object retention policy and legal hold policy
|
||||
// for requests with WORM headers
|
||||
// See https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-managing.html for the spec.
|
||||
// For non-existing objects with object retention headers set, this method returns ErrNone if bucket has
|
||||
// locking enabled and user has requisite permissions (s3:PutObjectRetention)
|
||||
// If object exists on object store and site wide WORM enabled - this method
|
||||
// returns an error. For objects in "Governance" mode, overwrite is allowed if the retention date has expired.
|
||||
// For objects in "Compliance" mode, retention date cannot be shortened, and mode cannot be altered.
|
||||
// For objects with legal hold header set, the s3:PutObjectLegalHold permission is expected to be set
|
||||
// Both legal hold and retention can be applied independently on an object
|
||||
func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, retentionPermErr, legalHoldPermErr APIErrorCode) (objectlock.RetMode, objectlock.RetentionDate, objectlock.ObjectLegalHold, APIErrorCode) {
|
||||
var mode objectlock.RetMode
|
||||
var retainDate objectlock.RetentionDate
|
||||
var legalHold objectlock.ObjectLegalHold
|
||||
|
||||
retentionRequested := objectlock.IsObjectLockRetentionRequested(r.Header)
|
||||
legalHoldRequested := objectlock.IsObjectLockLegalHoldRequested(r.Header)
|
||||
|
||||
retentionCfg, isWORMBucket := globalBucketObjectLockSys.Get(bucket)
|
||||
if !isWORMBucket {
|
||||
if legalHoldRequested || retentionRequested {
|
||||
return mode, retainDate, legalHold, ErrInvalidBucketObjectLockConfiguration
|
||||
}
|
||||
// If this not a WORM enabled bucket, we should return right here.
|
||||
return mode, retainDate, legalHold, ErrNone
|
||||
}
|
||||
|
||||
var objExists bool
|
||||
opts, err := getOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return mode, retainDate, legalHold, ErrObjectLocked
|
||||
}
|
||||
|
||||
if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil {
|
||||
objExists = true
|
||||
r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
|
||||
if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) {
|
||||
return mode, retainDate, legalHold, ErrObjectLocked
|
||||
}
|
||||
mode = r.Mode
|
||||
retainDate = r.RetainUntilDate
|
||||
legalHold = objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
|
||||
// Disallow overwriting an object on legal hold
|
||||
if legalHold.Status == objectlock.LegalHoldOn {
|
||||
return mode, retainDate, legalHold, ErrObjectLocked
|
||||
}
|
||||
}
|
||||
|
||||
if legalHoldRequested {
|
||||
var lerr error
|
||||
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(r.Header); lerr != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
if retentionRequested {
|
||||
legalHold, err := objectlock.ParseObjectLockLegalHoldHeaders(r.Header)
|
||||
if err != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(r.Header)
|
||||
if err != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
if objExists && retainDate.After(t) {
|
||||
return mode, retainDate, legalHold, ErrObjectLocked
|
||||
}
|
||||
if retentionPermErr != ErrNone {
|
||||
return mode, retainDate, legalHold, retentionPermErr
|
||||
}
|
||||
return rMode, rDate, legalHold, ErrNone
|
||||
}
|
||||
|
||||
if !retentionRequested && isWORMBucket {
|
||||
if retentionPermErr != ErrNone {
|
||||
return mode, retainDate, legalHold, retentionPermErr
|
||||
}
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return mode, retainDate, legalHold, ErrObjectLocked
|
||||
}
|
||||
// AWS S3 just creates a new version of object when an object is being overwritten.
|
||||
if objExists && retainDate.After(t) {
|
||||
return mode, retainDate, legalHold, ErrObjectLocked
|
||||
}
|
||||
if !legalHoldRequested && !retentionCfg.IsEmpty() {
|
||||
// inherit retention from bucket configuration
|
||||
return retentionCfg.Mode, objectlock.RetentionDate{Time: t.Add(retentionCfg.Validity)}, legalHold, ErrNone
|
||||
}
|
||||
return "", objectlock.RetentionDate{}, legalHold, ErrNone
|
||||
}
|
||||
return mode, retainDate, legalHold, ErrNone
|
||||
}
|
||||
|
||||
func readBucketObjectLockConfig(ctx context.Context, objAPI ObjectLayer, bucket string) (*objectlock.Config, error) {
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil && err != errMetaDataConverted {
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
if !meta.LockEnabled {
|
||||
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, objectLockConfig)
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
if err != errConfigNotFound {
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
return objectlock.NewObjectLockConfig(), nil
|
||||
}
|
||||
cfg, err := objectlock.ParseObjectLockConfig(bytes.NewReader(configData))
|
||||
if err != nil {
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func saveBucketObjectLockConfig(ctx context.Context, objAPI ObjectLayer, bucket string, config *objectlock.Config) error {
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil && err != errMetaDataConverted {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
if !meta.LockEnabled {
|
||||
return BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
data, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, objectLockConfig)
|
||||
if err = saveConfig(ctx, objAPI, configFile, data); err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewBucketObjectLockSys returns initialized BucketObjectLockSys
|
||||
func NewBucketObjectLockSys() *BucketObjectLockSys {
|
||||
return &BucketObjectLockSys{
|
||||
retentionMap: make(map[string]*objectlock.Retention),
|
||||
}
|
||||
}
|
||||
|
||||
// Init - initializes bucket object lock config system for all buckets
|
||||
func (sys *BucketObjectLockSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// In gateway mode, we always fetch the bucket object lock configuration from the gateway backend.
|
||||
// So, this is a no-op for gateway servers.
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load BucketObjectLockSys once during boot.
|
||||
return sys.load(buckets, objAPI)
|
||||
}
|
||||
|
||||
func (sys *BucketObjectLockSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
for _, bucket := range buckets {
|
||||
ctx := logger.SetReqInfo(GlobalContext, &logger.ReqInfo{BucketName: bucket.Name})
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket.Name)
|
||||
if err != nil {
|
||||
if err != errMetaDataConverted {
|
||||
return err
|
||||
}
|
||||
|
||||
meta.Created = bucket.Created
|
||||
logger.LogIf(ctx, meta.save(ctx, objAPI))
|
||||
}
|
||||
if !meta.LockEnabled {
|
||||
continue
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, bucket.Name, objectLockConfig)
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
globalBucketObjectLockSys.Set(bucket.Name, &objectlock.Retention{})
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
config, err := objectlock.ParseObjectLockConfig(bytes.NewReader(configData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
retention := &objectlock.Retention{}
|
||||
if config.Rule != nil {
|
||||
retention = config.ToRetention()
|
||||
}
|
||||
globalBucketObjectLockSys.Set(bucket.Name, retention)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -77,7 +77,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
|
||||
bucketPolicy, err := policy.ParseConfig(io.LimitReader(r.Body, r.ContentLength), bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -92,7 +92,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
|
||||
globalPolicySys.Set(bucket, *bucketPolicy)
|
||||
globalPolicySys.Set(bucket, bucketPolicy)
|
||||
globalNotificationSys.SetBucketPolicy(ctx, bucket, bucketPolicy)
|
||||
|
||||
// Success.
|
||||
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -29,8 +28,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
"github.com/minio/minio/pkg/policy/condition"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/policy/condition"
|
||||
)
|
||||
|
||||
func getAnonReadOnlyBucketPolicy(bucketName string) *policy.Policy {
|
||||
@@ -103,7 +102,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
if err := obj.MakeBucketWithLocation(context.Background(), bucketName1, ""); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, "", false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -20,27 +20,30 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
// PolicySys - policy subsystem.
|
||||
type PolicySys struct {
|
||||
sync.RWMutex
|
||||
bucketPolicyMap map[string]policy.Policy
|
||||
bucketPolicyMap map[string]*policy.Policy
|
||||
}
|
||||
|
||||
// Set - sets policy to given bucket name. If policy is empty, existing policy is removed.
|
||||
func (sys *PolicySys) Set(bucketName string, policy policy.Policy) {
|
||||
func (sys *PolicySys) Set(bucketName string, policy *policy.Policy) {
|
||||
if globalIsGateway {
|
||||
// Set policy is a non-op under gateway mode.
|
||||
return
|
||||
@@ -71,7 +74,7 @@ func (sys *PolicySys) IsAllowed(args policy.Args) bool {
|
||||
// is used to validate bucket policies.
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI != nil {
|
||||
config, err := objAPI.GetBucketPolicy(context.Background(), args.BucketName)
|
||||
config, err := objAPI.GetBucketPolicy(GlobalContext, args.BucketName)
|
||||
if err == nil {
|
||||
return config.IsAllowed(args)
|
||||
}
|
||||
@@ -94,12 +97,12 @@ func (sys *PolicySys) IsAllowed(args policy.Args) bool {
|
||||
// Loads policies for all buckets into PolicySys.
|
||||
func (sys *PolicySys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
for _, bucket := range buckets {
|
||||
config, err := objAPI.GetBucketPolicy(context.Background(), bucket.Name)
|
||||
config, err := getPolicyConfig(objAPI, bucket.Name)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketPolicyNotFound); ok {
|
||||
sys.Remove(bucket.Name)
|
||||
continue
|
||||
}
|
||||
continue
|
||||
return err
|
||||
}
|
||||
// This part is specifically written to handle migration
|
||||
// when the Version string is empty, this was allowed
|
||||
@@ -110,12 +113,12 @@ func (sys *PolicySys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
logger.Info("Found in-consistent bucket policies, Migrating them for Bucket: (%s)", bucket.Name)
|
||||
config.Version = policy.DefaultVersion
|
||||
|
||||
if err = savePolicyConfig(context.Background(), objAPI, bucket.Name, config); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
if err = savePolicyConfig(GlobalContext, objAPI, bucket.Name, config); err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
sys.Set(bucket.Name, *config)
|
||||
sys.Set(bucket.Name, config)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -123,7 +126,7 @@ func (sys *PolicySys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
// Init - initializes policy system from policy.json of all buckets.
|
||||
func (sys *PolicySys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errInvalidArgument
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// In gateway mode, we don't need to load the policies
|
||||
@@ -132,62 +135,55 @@ func (sys *PolicySys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
|
||||
// Initializing policy needs a retry mechanism for
|
||||
// the following reasons:
|
||||
// - Read quorum is lost just after the initialization
|
||||
// of the object layer.
|
||||
retryTimerCh := newRetryTimerSimple(doneCh)
|
||||
for {
|
||||
select {
|
||||
case <-retryTimerCh:
|
||||
// Load PolicySys once during boot.
|
||||
if err := sys.load(buckets, objAPI); err != nil {
|
||||
if err == errDiskNotFound ||
|
||||
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
|
||||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
|
||||
logger.Info("Waiting for policy subsystem to be initialized..")
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case <-globalOSSignalCh:
|
||||
return fmt.Errorf("Initializing Policy sub-system gracefully stopped")
|
||||
}
|
||||
}
|
||||
// Load PolicySys once during boot.
|
||||
return sys.load(buckets, objAPI)
|
||||
}
|
||||
|
||||
// NewPolicySys - creates new policy system.
|
||||
func NewPolicySys() *PolicySys {
|
||||
return &PolicySys{
|
||||
bucketPolicyMap: make(map[string]policy.Policy),
|
||||
bucketPolicyMap: make(map[string]*policy.Policy),
|
||||
}
|
||||
}
|
||||
|
||||
func getConditionValues(request *http.Request, locationConstraint string, username string, claims map[string]interface{}) map[string][]string {
|
||||
func getConditionValues(r *http.Request, lc string, username string, claims map[string]interface{}) map[string][]string {
|
||||
currTime := UTCNow()
|
||||
principalType := func() string {
|
||||
if username != "" {
|
||||
return "User"
|
||||
}
|
||||
return "Anonymous"
|
||||
}()
|
||||
|
||||
principalType := "Anonymous"
|
||||
if username != "" {
|
||||
principalType = "User"
|
||||
}
|
||||
|
||||
args := map[string][]string{
|
||||
"CurrenTime": {currTime.Format(event.AMZTimeFormat)},
|
||||
"EpochTime": {fmt.Sprintf("%d", currTime.Unix())},
|
||||
"CurrentTime": {currTime.Format(time.RFC3339)},
|
||||
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
|
||||
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
|
||||
"SourceIp": {handlers.GetSourceIP(r)},
|
||||
"UserAgent": {r.UserAgent()},
|
||||
"Referer": {r.Referer()},
|
||||
"principaltype": {principalType},
|
||||
"SecureTransport": {fmt.Sprintf("%t", request.TLS != nil)},
|
||||
"SourceIp": {handlers.GetSourceIP(request)},
|
||||
"UserAgent": {request.UserAgent()},
|
||||
"Referer": {request.Referer()},
|
||||
"userid": {username},
|
||||
"username": {username},
|
||||
}
|
||||
|
||||
for key, values := range request.Header {
|
||||
if lc != "" {
|
||||
args["LocationConstraint"] = []string{lc}
|
||||
}
|
||||
|
||||
cloneHeader := r.Header.Clone()
|
||||
|
||||
for _, objLock := range []string{
|
||||
xhttp.AmzObjectLockMode,
|
||||
xhttp.AmzObjectLockLegalHold,
|
||||
xhttp.AmzObjectLockRetainUntilDate,
|
||||
} {
|
||||
if values, ok := cloneHeader[objLock]; ok {
|
||||
args[strings.TrimPrefix(objLock, "X-Amz-")] = values
|
||||
}
|
||||
cloneHeader.Del(objLock)
|
||||
}
|
||||
|
||||
for key, values := range cloneHeader {
|
||||
if existingValues, found := args[key]; found {
|
||||
args[key] = append(existingValues, values...)
|
||||
} else {
|
||||
@@ -195,7 +191,23 @@ func getConditionValues(request *http.Request, locationConstraint string, userna
|
||||
}
|
||||
}
|
||||
|
||||
for key, values := range request.URL.Query() {
|
||||
var cloneURLValues = url.Values{}
|
||||
for k, v := range r.URL.Query() {
|
||||
cloneURLValues[k] = v
|
||||
}
|
||||
|
||||
for _, objLock := range []string{
|
||||
xhttp.AmzObjectLockMode,
|
||||
xhttp.AmzObjectLockLegalHold,
|
||||
xhttp.AmzObjectLockRetainUntilDate,
|
||||
} {
|
||||
if values, ok := cloneURLValues[objLock]; ok {
|
||||
args[strings.TrimPrefix(objLock, "X-Amz-")] = values
|
||||
}
|
||||
cloneURLValues.Del(objLock)
|
||||
}
|
||||
|
||||
for key, values := range cloneURLValues {
|
||||
if existingValues, found := args[key]; found {
|
||||
args[key] = append(existingValues, values...)
|
||||
} else {
|
||||
@@ -203,10 +215,6 @@ func getConditionValues(request *http.Request, locationConstraint string, userna
|
||||
}
|
||||
}
|
||||
|
||||
if locationConstraint != "" {
|
||||
args["LocationConstraint"] = []string{locationConstraint}
|
||||
}
|
||||
|
||||
// JWT specific values
|
||||
for k, v := range claims {
|
||||
vStr, ok := v.(string)
|
||||
@@ -214,6 +222,7 @@ func getConditionValues(request *http.Request, locationConstraint string, userna
|
||||
args[k] = []string{vStr}
|
||||
}
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
@@ -222,7 +231,7 @@ func getPolicyConfig(objAPI ObjectLayer, bucketName string) (*policy.Policy, err
|
||||
// Construct path to policy.json for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketPolicyConfig)
|
||||
|
||||
configData, err := readConfig(context.Background(), objAPI, configFile)
|
||||
configData, err := readConfig(GlobalContext, objAPI, configFile)
|
||||
if err != nil {
|
||||
if err == errConfigNotFound {
|
||||
err = BucketPolicyNotFound{Bucket: bucketName}
|
||||
@@ -275,6 +284,7 @@ func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.Buc
|
||||
}
|
||||
|
||||
var policyInfo miniogopolicy.BucketAccessPolicy
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(data, &policyInfo); err != nil {
|
||||
// This should not happen because data is valid to JSON data.
|
||||
return nil, err
|
||||
@@ -292,6 +302,7 @@ func BucketAccessPolicyToPolicy(policyInfo *miniogopolicy.BucketAccessPolicy) (*
|
||||
}
|
||||
|
||||
var bucketPolicy policy.Policy
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(data, &bucketPolicy); err != nil {
|
||||
// This should not happen because data is valid to JSON data.
|
||||
return nil, err
|
||||
277
cmd/bucket-quota.go
Normal file
277
cmd/bucket-quota.go
Normal file
@@ -0,0 +1,277 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
// BucketQuotaSys - map of bucket and quota configuration.
|
||||
type BucketQuotaSys struct {
|
||||
sync.RWMutex
|
||||
quotaMap map[string]madmin.BucketQuota
|
||||
}
|
||||
|
||||
// Set - set quota configuration.
|
||||
func (sys *BucketQuotaSys) Set(bucketName string, q madmin.BucketQuota) {
|
||||
sys.Lock()
|
||||
sys.quotaMap[bucketName] = q
|
||||
sys.Unlock()
|
||||
}
|
||||
|
||||
// Get - Get quota configuration.
|
||||
func (sys *BucketQuotaSys) Get(bucketName string) (q madmin.BucketQuota, ok bool) {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
q, ok = sys.quotaMap[bucketName]
|
||||
return
|
||||
}
|
||||
|
||||
// Remove - removes quota configuration.
|
||||
func (sys *BucketQuotaSys) Remove(bucketName string) {
|
||||
sys.Lock()
|
||||
delete(sys.quotaMap, bucketName)
|
||||
sys.Unlock()
|
||||
}
|
||||
|
||||
// Buckets - list of buckets with quota configuration
|
||||
func (sys *BucketQuotaSys) Buckets() []string {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
var buckets []string
|
||||
for k := range sys.quotaMap {
|
||||
buckets = append(buckets, k)
|
||||
}
|
||||
return buckets
|
||||
}
|
||||
|
||||
// Init initialize bucket quota sys configuration with all buckets.
|
||||
func (sys *BucketQuotaSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// In gateway mode, we do not support bucket quota.
|
||||
// So, this is a no-op for gateway servers.
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
|
||||
return sys.load(buckets, objAPI)
|
||||
}
|
||||
|
||||
func (sys *BucketQuotaSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
for _, bucket := range buckets {
|
||||
ctx := logger.SetReqInfo(GlobalContext, &logger.ReqInfo{BucketName: bucket.Name})
|
||||
configFile := path.Join(bucketConfigPrefix, bucket.Name, bucketQuotaConfigFile)
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
quotaCfg, err := parseBucketQuota(configData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
globalBucketQuotaSys.Set(bucket.Name, quotaCfg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewBucketQuotaSys returns initialized BucketQuotaSys
|
||||
func NewBucketQuotaSys() *BucketQuotaSys {
|
||||
return &BucketQuotaSys{quotaMap: map[string]madmin.BucketQuota{}}
|
||||
}
|
||||
|
||||
// parseBucketQuota parses BucketQuota from json
|
||||
func parseBucketQuota(data []byte) (quotaCfg madmin.BucketQuota, err error) {
|
||||
err = json.Unmarshal(data, "aCfg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !quotaCfg.Type.IsValid() {
|
||||
return quotaCfg, fmt.Errorf("Invalid quota type %s", quotaCfg.Type)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type bucketStorageCache struct {
|
||||
bucketsSizes map[string]uint64
|
||||
lastUpdate time.Time
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (b *bucketStorageCache) check(ctx context.Context, q madmin.BucketQuota, bucket string, size int64) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if time.Since(b.lastUpdate) > 10*time.Second {
|
||||
dui, err := loadDataUsageFromBackend(ctx, newObjectLayerWithoutSafeModeFn())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.lastUpdate = time.Now()
|
||||
b.bucketsSizes = dui.BucketsSizes
|
||||
}
|
||||
currUsage := b.bucketsSizes[bucket]
|
||||
if (currUsage + uint64(size)) > q.Quota {
|
||||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func enforceBucketQuota(ctx context.Context, bucket string, size int64) error {
|
||||
if size < 0 {
|
||||
return nil
|
||||
}
|
||||
q, ok := globalBucketQuotaSys.Get(bucket)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if q.Type == madmin.FIFOQuota {
|
||||
return nil
|
||||
}
|
||||
return globalBucketStorageCache.check(ctx, q, bucket, size)
|
||||
}
|
||||
|
||||
const (
|
||||
bgQuotaInterval = 1 * time.Hour
|
||||
)
|
||||
|
||||
// initQuotaEnforcement starts the routine that deletes objects in bucket
|
||||
// that exceeds the FIFO quota
|
||||
func initQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) {
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
|
||||
go startBucketQuotaEnforcement(ctx, objAPI)
|
||||
}
|
||||
}
|
||||
|
||||
func startBucketQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.NewTimer(bgQuotaInterval).C:
|
||||
logger.LogIf(ctx, enforceFIFOQuota(ctx, objAPI))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// enforceFIFOQuota deletes objects in FIFO order until sufficient objects
|
||||
// have been deleted so as to bring bucket usage within quota
|
||||
func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error {
|
||||
// Turn off quota enforcement if data usage info is unavailable.
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
|
||||
return nil
|
||||
}
|
||||
for _, bucket := range globalBucketQuotaSys.Buckets() {
|
||||
// Check if the current bucket has quota restrictions, if not skip it
|
||||
cfg, ok := globalBucketQuotaSys.Get(bucket)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if cfg.Type != madmin.FIFOQuota {
|
||||
continue
|
||||
}
|
||||
_, bucketHasLockConfig := globalBucketObjectLockSys.Get(bucket)
|
||||
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var toFree uint64
|
||||
if dataUsageInfo.BucketsSizes[bucket] > cfg.Quota {
|
||||
toFree = dataUsageInfo.BucketsSizes[bucket] - cfg.Quota
|
||||
}
|
||||
if toFree <= 0 {
|
||||
continue
|
||||
}
|
||||
// Allocate new results channel to receive ObjectInfo.
|
||||
objInfoCh := make(chan ObjectInfo)
|
||||
|
||||
// Walk through all objects
|
||||
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh); err != nil {
|
||||
return err
|
||||
}
|
||||
// reuse the fileScorer used by disk cache to score entries by
|
||||
// ModTime to find the oldest objects in bucket to delete. In
|
||||
// the context of bucket quota enforcement - number of hits are
|
||||
// irrelevant.
|
||||
scorer, err := newFileScorer(toFree, time.Now().Unix(), 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for obj := range objInfoCh {
|
||||
// skip objects currently under retention
|
||||
if bucketHasLockConfig && enforceRetentionForDeletion(ctx, obj) {
|
||||
continue
|
||||
}
|
||||
scorer.addFile(obj.Name, obj.ModTime, obj.Size, 1)
|
||||
}
|
||||
var objects []string
|
||||
numKeys := len(scorer.fileNames())
|
||||
for i, key := range scorer.fileNames() {
|
||||
objects = append(objects, key)
|
||||
if len(objects) < maxObjectList && (i < numKeys-1) {
|
||||
// skip deletion until maxObjectList or end of slice
|
||||
continue
|
||||
}
|
||||
|
||||
if len(objects) == 0 {
|
||||
break
|
||||
}
|
||||
// Deletes a list of objects.
|
||||
deleteErrs, err := objectAPI.DeleteObjects(ctx, bucket, objects)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
} else {
|
||||
for i := range deleteErrs {
|
||||
if deleteErrs[i] != nil {
|
||||
logger.LogIf(ctx, deleteErrs[i])
|
||||
continue
|
||||
}
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectRemovedDelete,
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
Name: objects[i],
|
||||
},
|
||||
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
|
||||
})
|
||||
}
|
||||
objects = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
61
cmd/bucket-tagging.go
Normal file
61
cmd/bucket-tagging.go
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"path"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/tags"
|
||||
)
|
||||
|
||||
func saveBucketTagging(ctx context.Context, objAPI ObjectLayer, bucket string, t *tags.Tags) error {
|
||||
data, err := xml.Marshal(t)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketTaggingConfigFile)
|
||||
if err := saveConfig(ctx, objAPI, configFile, data); err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteBucketTagging(ctx context.Context, objAPI ObjectLayer, bucket string) error {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketTaggingConfigFile)
|
||||
if err := deleteConfig(ctx, objAPI, configFile); err != nil && err != errConfigNotFound {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readBucketTagging(ctx context.Context, objAPI ObjectLayer, bucket string) (*tags.Tags, error) {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketTaggingConfigFile)
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
if err == errConfigNotFound {
|
||||
return nil, BucketTaggingNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
t := &tags.Tags{}
|
||||
if err = xml.Unmarshal(configData, t); err != nil {
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
@@ -18,8 +18,10 @@ package cmd
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -34,6 +36,17 @@ import (
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
func init() {
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
logger.RegisterError(config.FmtError)
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(GlobalContext)
|
||||
logger.AddTarget(globalConsoleSys)
|
||||
|
||||
gob.Register(StorageErr(""))
|
||||
}
|
||||
|
||||
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
|
||||
if (globalAutoEncryption || GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
@@ -136,6 +149,12 @@ func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
globalCLIContext.Addr = ctx.String("address")
|
||||
}
|
||||
|
||||
// Check "no-compat" flag from command line argument.
|
||||
globalCLIContext.StrictS3Compat = true
|
||||
if ctx.IsSet("no-compat") || ctx.GlobalIsSet("no-compat") {
|
||||
globalCLIContext.StrictS3Compat = false
|
||||
}
|
||||
|
||||
// Set all config, certs and CAs directories.
|
||||
var configSet, certsSet bool
|
||||
globalConfigDir, configSet = newConfigDirFromCtx(ctx, "config-dir", defaultConfigDir.Get)
|
||||
@@ -151,18 +170,27 @@ func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
|
||||
|
||||
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
|
||||
|
||||
// Check "compat" flag from command line argument.
|
||||
globalCLIContext.StrictS3Compat = ctx.IsSet("compat") || ctx.GlobalIsSet("compat")
|
||||
}
|
||||
|
||||
func handleCommonEnvVars() {
|
||||
var err error
|
||||
wormEnabled, err := config.LookupWorm()
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidWormValue(err), "Invalid worm configuration")
|
||||
}
|
||||
if wormEnabled {
|
||||
logger.Fatal(errors.New("WORM is deprecated"), "global MINIO_WORM support is removed, please downgrade your server or migrate to https://github.com/minio/minio/tree/master/docs/retention")
|
||||
}
|
||||
|
||||
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.EnableOn))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidBrowserValue(err), "Invalid MINIO_BROWSER value in environment variable")
|
||||
}
|
||||
|
||||
globalFSOSync, err = config.ParseBool(env.Get(config.EnvFSOSync, config.EnableOff))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidFSOSyncValue(err), "Invalid MINIO_FS_OSYNC value in environment variable")
|
||||
}
|
||||
|
||||
domains := env.Get(config.EnvDomain, "")
|
||||
if len(domains) != 0 {
|
||||
for _, domainName := range strings.Split(domains, config.ValueSeparator) {
|
||||
@@ -213,9 +241,15 @@ func handleCommonEnvVars() {
|
||||
globalConfigEncrypted = true
|
||||
}
|
||||
|
||||
globalWORMEnabled, err = config.LookupWorm()
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidWormValue(err), "Invalid worm configuration")
|
||||
if env.IsSet(config.EnvAccessKeyOld) && env.IsSet(config.EnvSecretKeyOld) {
|
||||
oldCred, err := auth.CreateCredentials(env.Get(config.EnvAccessKeyOld, ""), env.Get(config.EnvSecretKeyOld, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate the old credentials inherited from the shell environment")
|
||||
}
|
||||
globalOldCred = oldCred
|
||||
os.Unsetenv(config.EnvAccessKeyOld)
|
||||
os.Unsetenv(config.EnvSecretKeyOld)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -50,7 +50,11 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
return objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
|
||||
err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
|
||||
if err != nil && isErrObjectNotFound(err) {
|
||||
return errConfigNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
|
||||
@@ -17,16 +17,15 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/api"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
"github.com/minio/minio/cmd/config/etcd"
|
||||
xetcd "github.com/minio/minio/cmd/config/etcd"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
xldap "github.com/minio/minio/cmd/config/identity/ldap"
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
@@ -38,6 +37,7 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/logger/target/http"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
func initHelp() {
|
||||
@@ -49,6 +49,7 @@ func initHelp() {
|
||||
config.IdentityOpenIDSubSys: openid.DefaultKVS,
|
||||
config.PolicyOPASubSys: opa.DefaultKVS,
|
||||
config.RegionSubSys: config.DefaultRegionKVS,
|
||||
config.APISubSys: api.DefaultKVS,
|
||||
config.CredentialsSubSys: config.DefaultCredentialKVS,
|
||||
config.KmsVaultSubSys: crypto.DefaultVaultKVS,
|
||||
config.KmsKesSubSys: crypto.DefaultKesKVS,
|
||||
@@ -101,6 +102,10 @@ func initHelp() {
|
||||
Key: config.KmsKesSubSys,
|
||||
Description: "enable external MinIO key encryption service",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.APISubSys,
|
||||
Description: "manage global HTTP API call specific features, such as throttling, authentication types, etc.",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.LoggerWebhookSubSys,
|
||||
Description: "send server logs to webhook endpoints",
|
||||
@@ -175,6 +180,7 @@ func initHelp() {
|
||||
var helpMap = map[string]config.HelpKVS{
|
||||
"": helpSubSys, // Help for all sub-systems.
|
||||
config.RegionSubSys: config.RegionHelp,
|
||||
config.APISubSys: api.Help,
|
||||
config.StorageClassSubSys: storageclass.Help,
|
||||
config.EtcdSubSys: etcd.Help,
|
||||
config.CacheSubSys: cache.Help,
|
||||
@@ -222,6 +228,10 @@ func validateConfig(s config.Config) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := api.LookupConfig(s[config.APISubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if globalIsXL {
|
||||
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
|
||||
globalXLSetDriveCount); err != nil {
|
||||
@@ -251,7 +261,7 @@ func validateConfig(s config.Config) error {
|
||||
}
|
||||
}
|
||||
{
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewCustomHTTPTransport())
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -269,17 +279,27 @@ func validateConfig(s config.Config) error {
|
||||
}
|
||||
|
||||
if _, err := openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
NewCustomHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
|
||||
globalRootCAs); err != nil {
|
||||
return err
|
||||
{
|
||||
cfg, err := xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
|
||||
globalRootCAs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cfg.Enabled {
|
||||
conn, cerr := cfg.Connect()
|
||||
if cerr != nil {
|
||||
return cerr
|
||||
}
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
|
||||
NewCustomHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -287,12 +307,12 @@ func validateConfig(s config.Config) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return notify.TestNotificationTargets(s, GlobalServiceDoneCh, NewCustomHTTPTransport(),
|
||||
return notify.TestNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport(),
|
||||
globalNotificationSys.ConfiguredTargetIDs())
|
||||
}
|
||||
|
||||
func lookupConfigs(s config.Config) {
|
||||
ctx := context.Background()
|
||||
ctx := GlobalContext
|
||||
|
||||
var err error
|
||||
if !globalActiveCred.IsValid() {
|
||||
@@ -303,13 +323,13 @@ func lookupConfigs(s config.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
etcdCfg, err := xetcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
|
||||
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
|
||||
}
|
||||
|
||||
if etcdCfg.Enabled {
|
||||
globalEtcdClient, err = xetcd.New(etcdCfg)
|
||||
globalEtcdClient, err = etcd.New(etcdCfg)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
|
||||
}
|
||||
@@ -340,6 +360,18 @@ func lookupConfigs(s config.Config) {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid region configuration: %w", err))
|
||||
}
|
||||
|
||||
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
|
||||
apiRequestsMax := apiConfig.APIRequestsMax
|
||||
if len(globalEndpoints.Hosts()) > 0 {
|
||||
apiRequestsMax /= len(globalEndpoints.Hosts())
|
||||
}
|
||||
|
||||
globalAPIThrottling.init(apiRequestsMax, apiConfig.APIRequestsDeadline)
|
||||
|
||||
if globalIsXL {
|
||||
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
|
||||
globalXLSetDriveCount)
|
||||
@@ -362,7 +394,7 @@ func lookupConfigs(s config.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewCustomHTTPTransport())
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS config: %w", err))
|
||||
}
|
||||
@@ -381,13 +413,13 @@ func lookupConfigs(s config.Config) {
|
||||
}
|
||||
|
||||
globalOpenIDConfig, err = openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
NewCustomHTTPTransport(), xhttp.DrainBody)
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err))
|
||||
}
|
||||
|
||||
opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
|
||||
NewCustomHTTPTransport(), xhttp.DrainBody)
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OPA: %w", err))
|
||||
}
|
||||
@@ -412,18 +444,37 @@ func lookupConfigs(s config.Config) {
|
||||
for _, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
// Enable http logging
|
||||
logger.AddTarget(http.New(l.Endpoint, loggerUserAgent, string(logger.All), NewCustomHTTPTransport()))
|
||||
logger.AddTarget(
|
||||
http.New(http.WithEndpoint(l.Endpoint),
|
||||
http.WithAuthToken(l.AuthToken),
|
||||
http.WithUserAgent(loggerUserAgent),
|
||||
http.WithLogKind(string(logger.All)),
|
||||
http.WithTransport(NewGatewayHTTPTransport()),
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.Audit {
|
||||
if l.Enabled {
|
||||
// Enable http audit logging
|
||||
logger.AddAuditTarget(http.New(l.Endpoint, loggerUserAgent, string(logger.All), NewCustomHTTPTransport()))
|
||||
logger.AddAuditTarget(
|
||||
http.New(http.WithEndpoint(l.Endpoint),
|
||||
http.WithAuthToken(l.AuthToken),
|
||||
http.WithUserAgent(loggerUserAgent),
|
||||
http.WithLogKind(string(logger.All)),
|
||||
http.WithTransport(NewGatewayHTTPTransport()),
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
globalConfigTargetList, err = notify.GetNotificationTargets(s, GlobalServiceDoneCh, NewCustomHTTPTransport())
|
||||
globalConfigTargetList, err = notify.GetNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport())
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
|
||||
}
|
||||
|
||||
globalEnvTargetList, err = notify.GetNotificationTargets(newServerConfig(), GlobalContext.Done(), NewGatewayHTTPTransport())
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
|
||||
}
|
||||
@@ -469,6 +520,20 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
|
||||
|
||||
envHelp := config.HelpKVS{}
|
||||
if envOnly {
|
||||
// Only for multiple targets, make sure
|
||||
// to list the ENV, for regular k/v EnableKey is
|
||||
// implicit, for ENVs we cannot make it implicit.
|
||||
if subSysHelp.MultipleTargets {
|
||||
envK := config.EnvPrefix + strings.Join([]string{
|
||||
strings.ToTitle(subSys), strings.ToTitle(madmin.EnableKey),
|
||||
}, config.EnvWordDelimiter)
|
||||
envHelp = append(envHelp, config.HelpKV{
|
||||
Key: envK,
|
||||
Description: fmt.Sprintf("enable %s target, default is 'off'", subSys),
|
||||
Optional: false,
|
||||
Type: "on|off",
|
||||
})
|
||||
}
|
||||
for _, hkv := range h {
|
||||
envK := config.EnvPrefix + strings.Join([]string{
|
||||
strings.ToTitle(subSys), strings.ToTitle(hkv.Key),
|
||||
@@ -510,11 +575,11 @@ func newSrvConfig(objAPI ObjectLayer) error {
|
||||
globalServerConfigMu.Unlock()
|
||||
|
||||
// Save config into file.
|
||||
return saveServerConfig(context.Background(), objAPI, globalServerConfig)
|
||||
return saveServerConfig(GlobalContext, objAPI, globalServerConfig)
|
||||
}
|
||||
|
||||
func getValidConfig(objAPI ObjectLayer) (config.Config, error) {
|
||||
return readServerConfig(context.Background(), objAPI)
|
||||
return readServerConfig(GlobalContext, objAPI)
|
||||
}
|
||||
|
||||
// loadConfig - loads a new config from disk, overrides params
|
||||
|
||||
@@ -21,15 +21,13 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
@@ -38,37 +36,10 @@ func handleEncryptedConfigBackend(objAPI ObjectLayer, server bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If its server mode or nas gateway, migrate the backend.
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
var encrypted bool
|
||||
var err error
|
||||
|
||||
// Migrating Config backend needs a retry mechanism for
|
||||
// the following reasons:
|
||||
// - Read quorum is lost just after the initialization
|
||||
// of the object layer.
|
||||
retryTimerCh := newRetryTimerSimple(doneCh)
|
||||
var stop bool
|
||||
for !stop {
|
||||
select {
|
||||
case <-retryTimerCh:
|
||||
if encrypted, err = checkBackendEncrypted(objAPI); err != nil {
|
||||
if err == errDiskNotFound ||
|
||||
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) {
|
||||
logger.Info("Waiting for config backend to be encrypted..")
|
||||
continue
|
||||
}
|
||||
close(doneCh)
|
||||
return err
|
||||
}
|
||||
stop = true
|
||||
case <-globalOSSignalCh:
|
||||
close(doneCh)
|
||||
return fmt.Errorf("Config encryption process stopped gracefully")
|
||||
}
|
||||
encrypted, err := checkBackendEncrypted(objAPI)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to encrypt config %w", err)
|
||||
}
|
||||
close(doneCh)
|
||||
|
||||
if encrypted {
|
||||
// backend is encrypted, but credentials are not specified
|
||||
@@ -84,42 +55,16 @@ func handleEncryptedConfigBackend(objAPI ObjectLayer, server bool) error {
|
||||
return nil
|
||||
}
|
||||
if !globalActiveCred.IsValid() {
|
||||
return errInvalidArgument
|
||||
return config.ErrMissingCredentialsBackendEncrypted(nil)
|
||||
}
|
||||
}
|
||||
|
||||
activeCredOld, err := getOldCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
// Migrate IAM configuration
|
||||
if err = migrateConfigPrefixToEncrypted(objAPI, globalOldCred, encrypted); err != nil {
|
||||
return fmt.Errorf("Unable to migrate all config at .minio.sys/config/: %w", err)
|
||||
}
|
||||
|
||||
doneCh = make(chan struct{})
|
||||
defer close(doneCh)
|
||||
|
||||
retryTimerCh = newRetryTimerSimple(doneCh)
|
||||
|
||||
// Migrating Config backend needs a retry mechanism for
|
||||
// the following reasons:
|
||||
// - Read quorum is lost just after the initialization
|
||||
// of the object layer.
|
||||
for {
|
||||
select {
|
||||
case <-retryTimerCh:
|
||||
// Migrate IAM configuration
|
||||
if err = migrateConfigPrefixToEncrypted(objAPI, activeCredOld, encrypted); err != nil {
|
||||
if err == errDiskNotFound ||
|
||||
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
|
||||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
|
||||
logger.Info("Waiting for config backend to be encrypted..")
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case <-globalOSSignalCh:
|
||||
return fmt.Errorf("Config encryption process stopped gracefully")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -140,7 +85,7 @@ func checkBackendEtcdEncrypted(ctx context.Context, client *etcd.Client) (bool,
|
||||
}
|
||||
|
||||
func checkBackendEncrypted(objAPI ObjectLayer) (bool, error) {
|
||||
data, err := readConfig(context.Background(), objAPI, backendEncryptedFile)
|
||||
data, err := readConfig(GlobalContext, objAPI, backendEncryptedFile)
|
||||
if err != nil && err != errConfigNotFound {
|
||||
return false, err
|
||||
}
|
||||
@@ -164,25 +109,7 @@ func decryptData(edata []byte, creds ...auth.Credentials) ([]byte, error) {
|
||||
return data, err
|
||||
}
|
||||
|
||||
func getOldCreds() (activeCredOld auth.Credentials, err error) {
|
||||
accessKeyOld := env.Get(config.EnvAccessKeyOld, "")
|
||||
secretKeyOld := env.Get(config.EnvSecretKeyOld, "")
|
||||
if accessKeyOld != "" && secretKeyOld != "" {
|
||||
activeCredOld, err = auth.CreateCredentials(accessKeyOld, secretKeyOld)
|
||||
if err != nil {
|
||||
return activeCredOld, err
|
||||
}
|
||||
// Once we have obtained the rotating creds
|
||||
os.Unsetenv(config.EnvAccessKeyOld)
|
||||
os.Unsetenv(config.EnvSecretKeyOld)
|
||||
}
|
||||
return activeCredOld, nil
|
||||
}
|
||||
|
||||
func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
defer cancel()
|
||||
|
||||
func migrateIAMConfigsEtcdToEncrypted(ctx context.Context, client *etcd.Client) error {
|
||||
encrypted, err := checkBackendEtcdEncrypted(ctx, client)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -206,20 +133,15 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
|
||||
}
|
||||
}
|
||||
|
||||
activeCredOld, err := getOldCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if encrypted {
|
||||
// No key rotation requested, and backend is
|
||||
// already encrypted. We proceed without migration.
|
||||
if !activeCredOld.IsValid() {
|
||||
if !globalOldCred.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// No real reason to rotate if old and new creds are same.
|
||||
if activeCredOld.Equal(globalActiveCred) {
|
||||
if globalOldCred.Equal(globalActiveCred) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -228,7 +150,10 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
|
||||
logger.Info("Attempting encryption of all IAM users and policies on etcd")
|
||||
}
|
||||
|
||||
r, err := client.Get(ctx, minioConfigPrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
|
||||
listCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
r, err := client.Get(listCtx, minioConfigPrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -254,8 +179,8 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
|
||||
|
||||
var data []byte
|
||||
// Is rotating of creds requested?
|
||||
if activeCredOld.IsValid() {
|
||||
data, err = decryptData(cdata, activeCredOld, globalActiveCred)
|
||||
if globalOldCred.IsValid() {
|
||||
data, err = decryptData(cdata, globalOldCred, globalActiveCred)
|
||||
if err != nil {
|
||||
if err == madmin.ErrMaliciousData {
|
||||
return config.ErrInvalidRotatingCredentialsBackendEncrypted(nil)
|
||||
@@ -267,7 +192,12 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
|
||||
}
|
||||
|
||||
if !utf8.Valid(data) {
|
||||
return errors.New("config data not in plain-text form")
|
||||
_, err = decryptData(data, globalActiveCred)
|
||||
if err == nil {
|
||||
// Config is already encrypted with right keys
|
||||
continue
|
||||
}
|
||||
return errors.New("config data not in plain-text form or encrypted")
|
||||
}
|
||||
|
||||
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
|
||||
@@ -279,9 +209,11 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if encrypted && globalActiveCred.IsValid() {
|
||||
|
||||
if encrypted && globalActiveCred.IsValid() && globalOldCred.IsValid() {
|
||||
logger.Info("Rotation complete, please make sure to unset MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD envs")
|
||||
}
|
||||
|
||||
return saveKeyEtcd(ctx, client, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
@@ -302,14 +234,15 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
|
||||
logger.Info("Attempting encryption of all config, IAM users and policies on MinIO backend")
|
||||
}
|
||||
|
||||
err := saveConfig(context.Background(), objAPI, backendEncryptedFile, backendEncryptedMigrationIncomplete)
|
||||
err := saveConfig(GlobalContext, objAPI, backendEncryptedFile, backendEncryptedMigrationIncomplete)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var marker string
|
||||
for {
|
||||
res, err := objAPI.ListObjects(context.Background(), minioMetaBucket, minioConfigPrefix, marker, "", maxObjectList)
|
||||
res, err := objAPI.ListObjects(GlobalContext, minioMetaBucket,
|
||||
minioConfigPrefix, marker, "", maxObjectList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -319,7 +252,7 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
|
||||
cencdata []byte
|
||||
)
|
||||
|
||||
cdata, err = readConfig(context.Background(), objAPI, obj.Name)
|
||||
cdata, err = readConfig(GlobalContext, objAPI, obj.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -339,7 +272,12 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
|
||||
}
|
||||
|
||||
if !utf8.Valid(data) {
|
||||
return errors.New("config data not in plain-text form")
|
||||
_, err = decryptData(data, globalActiveCred)
|
||||
if err == nil {
|
||||
// Config is already encrypted with right keys
|
||||
continue
|
||||
}
|
||||
return errors.New("config data not in plain-text form or encrypted")
|
||||
}
|
||||
|
||||
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
|
||||
@@ -347,7 +285,7 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, obj.Name, cencdata); err != nil {
|
||||
if err = saveConfig(GlobalContext, objAPI, obj.Name, cencdata); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -359,9 +297,9 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
|
||||
marker = res.NextMarker
|
||||
}
|
||||
|
||||
if encrypted && globalActiveCred.IsValid() {
|
||||
if encrypted && globalActiveCred.IsValid() && activeCredOld.IsValid() {
|
||||
logger.Info("Rotation complete, please make sure to unset MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD envs")
|
||||
}
|
||||
|
||||
return saveConfig(context.Background(), objAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
return saveConfig(GlobalContext, objAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -2430,7 +2429,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
defer func() {
|
||||
if err == nil {
|
||||
if globalEtcdClient != nil {
|
||||
deleteKeyEtcd(context.Background(), globalEtcdClient, configFile)
|
||||
deleteKeyEtcd(GlobalContext, globalEtcdClient, configFile)
|
||||
} else {
|
||||
// Rename config.json to config.json.deprecated only upon
|
||||
// success of this function.
|
||||
@@ -2440,7 +2439,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
}()
|
||||
|
||||
// Verify if backend already has the file (after holding lock)
|
||||
if err = checkConfig(context.Background(), objAPI, configFile); err != errConfigNotFound {
|
||||
if err = checkConfig(GlobalContext, objAPI, configFile); err != errConfigNotFound {
|
||||
return err
|
||||
} // if errConfigNotFound proceed to migrate..
|
||||
|
||||
@@ -2466,7 +2465,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
// Initialize the server config, if no config exists.
|
||||
return newSrvConfig(objAPI)
|
||||
}
|
||||
return saveServerConfig(context.Background(), objAPI, config)
|
||||
return saveServerConfig(GlobalContext, objAPI, config)
|
||||
}
|
||||
|
||||
// Migrates '.minio.sys/config.json' to v33.
|
||||
@@ -2502,7 +2501,7 @@ func migrateMinioSysConfig(objAPI ObjectLayer) error {
|
||||
}
|
||||
|
||||
func checkConfigVersion(objAPI ObjectLayer, configFile string, version string) (bool, []byte, error) {
|
||||
data, err := readConfig(context.Background(), objAPI, configFile)
|
||||
data, err := readConfig(GlobalContext, objAPI, configFile)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
@@ -2548,7 +2547,7 @@ func migrateV27ToV28MinioSys(objAPI ObjectLayer) error {
|
||||
cfg.Version = "28"
|
||||
cfg.KMS = crypto.KMSConfig{}
|
||||
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
|
||||
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘27’ to ‘28’. %w", err)
|
||||
}
|
||||
|
||||
@@ -2575,7 +2574,7 @@ func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
|
||||
}
|
||||
|
||||
cfg.Version = "29"
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
|
||||
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘28’ to ‘29’. %w", err)
|
||||
}
|
||||
|
||||
@@ -2607,7 +2606,7 @@ func migrateV29ToV30MinioSys(objAPI ObjectLayer) error {
|
||||
cfg.Compression.Extensions = strings.Split(compress.DefaultExtensions, config.ValueSeparator)
|
||||
cfg.Compression.MimeTypes = strings.Split(compress.DefaultMimeTypes, config.ValueSeparator)
|
||||
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
|
||||
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘29’ to ‘30’. %w", err)
|
||||
}
|
||||
|
||||
@@ -2642,7 +2641,7 @@ func migrateV30ToV31MinioSys(objAPI ObjectLayer) error {
|
||||
AuthToken: "",
|
||||
}
|
||||
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
|
||||
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘30’ to ‘31’. %w", err)
|
||||
}
|
||||
|
||||
@@ -2672,7 +2671,7 @@ func migrateV31ToV32MinioSys(objAPI ObjectLayer) error {
|
||||
cfg.Notify.NSQ = make(map[string]target.NSQArgs)
|
||||
cfg.Notify.NSQ["1"] = target.NSQArgs{}
|
||||
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
|
||||
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘31’ to ‘32’. %w", err)
|
||||
}
|
||||
|
||||
@@ -2700,7 +2699,7 @@ func migrateV32ToV33MinioSys(objAPI ObjectLayer) error {
|
||||
|
||||
cfg.Version = "33"
|
||||
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
|
||||
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from '32' to '33' . %w", err)
|
||||
}
|
||||
|
||||
@@ -2777,7 +2776,7 @@ func migrateMinioSysConfigToKV(objAPI ObjectLayer) error {
|
||||
notify.SetNotifyWebhook(newCfg, k, args)
|
||||
}
|
||||
|
||||
if err = saveServerConfig(context.Background(), objAPI, newCfg); err != nil {
|
||||
if err = saveServerConfig(GlobalContext, objAPI, newCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -20,14 +20,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
@@ -41,9 +40,6 @@ const (
|
||||
|
||||
// MinIO configuration file.
|
||||
minioConfigFile = "config.json"
|
||||
|
||||
// MinIO configuration backup file
|
||||
minioConfigBackupFile = minioConfigFile + ".backup"
|
||||
)
|
||||
|
||||
func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer, withData bool, count int) (
|
||||
@@ -170,6 +166,7 @@ func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, e
|
||||
}
|
||||
|
||||
var config = config.New()
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(configData, &config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -186,16 +183,14 @@ func (sys *ConfigSys) Load(objAPI ObjectLayer) error {
|
||||
}
|
||||
|
||||
// WatchConfigNASDisk - watches nas disk on periodic basis.
|
||||
func (sys *ConfigSys) WatchConfigNASDisk(objAPI ObjectLayer) {
|
||||
func (sys *ConfigSys) WatchConfigNASDisk(ctx context.Context, objAPI ObjectLayer) {
|
||||
configInterval := globalRefreshIAMInterval
|
||||
watchDisk := func() {
|
||||
ticker := time.NewTicker(configInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-GlobalServiceDoneCh:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
case <-time.After(configInterval):
|
||||
loadConfig(objAPI)
|
||||
}
|
||||
}
|
||||
@@ -210,33 +205,7 @@ func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
|
||||
// Initializing configuration needs a retry mechanism for
|
||||
// the following reasons:
|
||||
// - Read quorum is lost just after the initialization
|
||||
// of the object layer.
|
||||
// - Write quorum not met when upgrading configuration
|
||||
// version is needed.
|
||||
retryTimerCh := newRetryTimerSimple(doneCh)
|
||||
for {
|
||||
select {
|
||||
case <-retryTimerCh:
|
||||
if err := initConfig(objAPI); err != nil {
|
||||
if err == errDiskNotFound ||
|
||||
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
|
||||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
|
||||
logger.Info("Waiting for configuration to be initialized..")
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case <-globalOSSignalCh:
|
||||
return fmt.Errorf("Initializing config sub-system gracefully stopped")
|
||||
}
|
||||
}
|
||||
return initConfig(objAPI)
|
||||
}
|
||||
|
||||
// NewConfigSys - creates new config system object.
|
||||
|
||||
91
cmd/config/api/api.go
Normal file
91
cmd/config/api/api.go
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
const (
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
)
|
||||
|
||||
// DefaultKVS - default storage class config
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.KV{
|
||||
Key: apiRequestsMax,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiRequestsDeadline,
|
||||
Value: "10s",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// Config storage class configuration
|
||||
type Config struct {
|
||||
APIRequestsMax int `json:"requests_max"`
|
||||
APIRequestsDeadline time.Duration `json:"requests_deadline"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
|
||||
func (sCfg *Config) UnmarshalJSON(data []byte) error {
|
||||
type Alias Config
|
||||
aux := &struct {
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(sCfg),
|
||||
}
|
||||
return json.Unmarshal(data, &aux)
|
||||
}
|
||||
|
||||
// LookupConfig - lookup api config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
if err = config.CheckValidKeys(config.APISubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
// Check environment variables parameters
|
||||
requestsMax, err := strconv.Atoi(env.Get(config.EnvAPIRequestsMax, kvs.Get(apiRequestsMax)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
if requestsMax < 0 {
|
||||
return cfg, errors.New("invalid API max requests value")
|
||||
}
|
||||
|
||||
requestsDeadline, err := time.ParseDuration(env.Get(config.EnvAPIRequestsDeadline, kvs.Get(apiRequestsDeadline)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
cfg = Config{
|
||||
APIRequestsMax: requestsMax,
|
||||
APIRequestsDeadline: requestsDeadline,
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
37
cmd/config/api/help.go
Normal file
37
cmd/config/api/help.go
Normal file
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import "github.com/minio/minio/cmd/config"
|
||||
|
||||
// Help template for storageclass feature.
|
||||
var (
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: apiRequestsMax,
|
||||
Description: `set the maximum number of concurrent requests, e.g. "1600"`,
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: apiRequestsDeadline,
|
||||
Description: `set the deadline for API requests waiting to be processed e.g. "1m"`,
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
}
|
||||
)
|
||||
28
cmd/config/cache/config.go
vendored
28
cmd/config/cache/config.go
vendored
@@ -28,12 +28,15 @@ import (
|
||||
|
||||
// Config represents cache config settings
|
||||
type Config struct {
|
||||
Enabled bool `json:"-"`
|
||||
Drives []string `json:"drives"`
|
||||
Expiry int `json:"expiry"`
|
||||
MaxUse int `json:"maxuse"`
|
||||
Quota int `json:"quota"`
|
||||
Exclude []string `json:"exclude"`
|
||||
Enabled bool `json:"-"`
|
||||
Drives []string `json:"drives"`
|
||||
Expiry int `json:"expiry"`
|
||||
MaxUse int `json:"maxuse"`
|
||||
Quota int `json:"quota"`
|
||||
Exclude []string `json:"exclude"`
|
||||
After int `json:"after"`
|
||||
WatermarkLow int `json:"watermark_low"`
|
||||
WatermarkHigh int `json:"watermark_high"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON - implements JSON unmarshal interface for unmarshalling
|
||||
@@ -60,7 +63,18 @@ func (cfg *Config) UnmarshalJSON(data []byte) (err error) {
|
||||
if _cfg.Quota < 0 {
|
||||
return errors.New("config quota value should not be null or negative")
|
||||
}
|
||||
|
||||
if _cfg.After < 0 {
|
||||
return errors.New("cache after value should not be less than 0")
|
||||
}
|
||||
if _cfg.WatermarkLow < 0 || _cfg.WatermarkLow > 100 {
|
||||
return errors.New("config low watermark value should be between 0 and 100")
|
||||
}
|
||||
if _cfg.WatermarkHigh < 0 || _cfg.WatermarkHigh > 100 {
|
||||
return errors.New("config high watermark value should be between 0 and 100")
|
||||
}
|
||||
if _cfg.WatermarkLow > 0 && (_cfg.WatermarkLow >= _cfg.WatermarkHigh) {
|
||||
return errors.New("config low watermark value should be less than high watermark")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
18
cmd/config/cache/help.go
vendored
18
cmd/config/cache/help.go
vendored
@@ -50,5 +50,23 @@ var (
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: After,
|
||||
Description: `minimum accesses before caching an object`,
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: WatermarkLow,
|
||||
Description: `% of cache use at which to stop cache eviction`,
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: WatermarkHigh,
|
||||
Description: `% of cache use at which to start cache eviction`,
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
91
cmd/config/cache/lookup.go
vendored
91
cmd/config/cache/lookup.go
vendored
@@ -26,21 +26,31 @@ import (
|
||||
|
||||
// Cache ENVs
|
||||
const (
|
||||
Drives = "drives"
|
||||
Exclude = "exclude"
|
||||
Expiry = "expiry"
|
||||
MaxUse = "maxuse"
|
||||
Quota = "quota"
|
||||
Drives = "drives"
|
||||
Exclude = "exclude"
|
||||
Expiry = "expiry"
|
||||
MaxUse = "maxuse"
|
||||
Quota = "quota"
|
||||
After = "after"
|
||||
WatermarkLow = "watermark_low"
|
||||
WatermarkHigh = "watermark_high"
|
||||
|
||||
EnvCacheDrives = "MINIO_CACHE_DRIVES"
|
||||
EnvCacheExclude = "MINIO_CACHE_EXCLUDE"
|
||||
EnvCacheExpiry = "MINIO_CACHE_EXPIRY"
|
||||
EnvCacheMaxUse = "MINIO_CACHE_MAXUSE"
|
||||
EnvCacheQuota = "MINIO_CACHE_QUOTA"
|
||||
EnvCacheAfter = "MINIO_CACHE_AFTER"
|
||||
EnvCacheWatermarkLow = "MINIO_CACHE_WATERMARK_LOW"
|
||||
EnvCacheWatermarkHigh = "MINIO_CACHE_WATERMARK_HIGH"
|
||||
|
||||
EnvCacheDrives = "MINIO_CACHE_DRIVES"
|
||||
EnvCacheExclude = "MINIO_CACHE_EXCLUDE"
|
||||
EnvCacheExpiry = "MINIO_CACHE_EXPIRY"
|
||||
EnvCacheMaxUse = "MINIO_CACHE_MAXUSE"
|
||||
EnvCacheQuota = "MINIO_CACHE_QUOTA"
|
||||
EnvCacheEncryptionMasterKey = "MINIO_CACHE_ENCRYPTION_MASTER_KEY"
|
||||
|
||||
DefaultExpiry = "90"
|
||||
DefaultQuota = "80"
|
||||
DefaultExpiry = "90"
|
||||
DefaultQuota = "80"
|
||||
DefaultAfter = "0"
|
||||
DefaultWaterMarkLow = "70"
|
||||
DefaultWaterMarkHigh = "80"
|
||||
)
|
||||
|
||||
// DefaultKVS - default KV settings for caching.
|
||||
@@ -62,6 +72,18 @@ var (
|
||||
Key: Quota,
|
||||
Value: DefaultQuota,
|
||||
},
|
||||
config.KV{
|
||||
Key: After,
|
||||
Value: DefaultAfter,
|
||||
},
|
||||
config.KV{
|
||||
Key: WatermarkLow,
|
||||
Value: DefaultWaterMarkLow,
|
||||
},
|
||||
config.KV{
|
||||
Key: WatermarkHigh,
|
||||
Value: DefaultWaterMarkHigh,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -79,7 +101,6 @@ func Enabled(kvs config.KVS) bool {
|
||||
// variables and merge them with provided CacheConfiguration.
|
||||
func LookupConfig(kvs config.KVS) (Config, error) {
|
||||
cfg := Config{}
|
||||
|
||||
if err := config.CheckValidKeys(config.CacheSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
@@ -121,9 +142,7 @@ func LookupConfig(kvs config.KVS) (Config, error) {
|
||||
return cfg, config.ErrInvalidCacheQuota(err)
|
||||
}
|
||||
cfg.Quota = cfg.MaxUse
|
||||
}
|
||||
|
||||
if quotaStr := env.Get(EnvCacheQuota, kvs.Get(Quota)); quotaStr != "" {
|
||||
} else if quotaStr := env.Get(EnvCacheQuota, kvs.Get(Quota)); quotaStr != "" {
|
||||
cfg.Quota, err = strconv.Atoi(quotaStr)
|
||||
if err != nil {
|
||||
return cfg, config.ErrInvalidCacheQuota(err)
|
||||
@@ -136,5 +155,45 @@ func LookupConfig(kvs config.KVS) (Config, error) {
|
||||
cfg.MaxUse = cfg.Quota
|
||||
}
|
||||
|
||||
if afterStr := env.Get(EnvCacheAfter, kvs.Get(After)); afterStr != "" {
|
||||
cfg.After, err = strconv.Atoi(afterStr)
|
||||
if err != nil {
|
||||
return cfg, config.ErrInvalidCacheAfter(err)
|
||||
}
|
||||
// after should be a valid value >= 0.
|
||||
if cfg.After < 0 {
|
||||
err := errors.New("cache after value cannot be less than 0")
|
||||
return cfg, config.ErrInvalidCacheAfter(err)
|
||||
}
|
||||
}
|
||||
|
||||
if lowWMStr := env.Get(EnvCacheWatermarkLow, kvs.Get(WatermarkLow)); lowWMStr != "" {
|
||||
cfg.WatermarkLow, err = strconv.Atoi(lowWMStr)
|
||||
if err != nil {
|
||||
return cfg, config.ErrInvalidCacheWatermarkLow(err)
|
||||
}
|
||||
// WatermarkLow should be a valid percentage.
|
||||
if cfg.WatermarkLow < 0 || cfg.WatermarkLow > 100 {
|
||||
err := errors.New("config min watermark value should be between 0 and 100")
|
||||
return cfg, config.ErrInvalidCacheWatermarkLow(err)
|
||||
}
|
||||
}
|
||||
|
||||
if highWMStr := env.Get(EnvCacheWatermarkHigh, kvs.Get(WatermarkHigh)); highWMStr != "" {
|
||||
cfg.WatermarkHigh, err = strconv.Atoi(highWMStr)
|
||||
if err != nil {
|
||||
return cfg, config.ErrInvalidCacheWatermarkHigh(err)
|
||||
}
|
||||
|
||||
// MaxWatermark should be a valid percentage.
|
||||
if cfg.WatermarkHigh < 0 || cfg.WatermarkHigh > 100 {
|
||||
err := errors.New("config high watermark value should be between 0 and 100")
|
||||
return cfg, config.ErrInvalidCacheWatermarkHigh(err)
|
||||
}
|
||||
}
|
||||
if cfg.WatermarkLow > cfg.WatermarkHigh {
|
||||
err := errors.New("config high watermark value should be greater than low watermark value")
|
||||
return cfg, config.ErrInvalidCacheWatermarkHigh(err)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
92
cmd/config/certsinfo.go
Normal file
92
cmd/config/certsinfo.go
Normal file
@@ -0,0 +1,92 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
color "github.com/minio/minio/pkg/color"
|
||||
)
|
||||
|
||||
// Extra ASN1 OIDs that we may need to handle
|
||||
var (
|
||||
oidEmailAddress = []int{1, 2, 840, 113549, 1, 9, 1}
|
||||
)
|
||||
|
||||
// printName prints the fields of a distinguished name, which include such
|
||||
// things as its common name and locality.
|
||||
func printName(names []pkix.AttributeTypeAndValue, buf *strings.Builder) []string {
|
||||
values := []string{}
|
||||
for _, name := range names {
|
||||
oid := name.Type
|
||||
if len(oid) == 4 && oid[0] == 2 && oid[1] == 5 && oid[2] == 4 {
|
||||
switch oid[3] {
|
||||
case 3:
|
||||
values = append(values, fmt.Sprintf("CN=%s", name.Value))
|
||||
case 6:
|
||||
values = append(values, fmt.Sprintf("C=%s", name.Value))
|
||||
case 8:
|
||||
values = append(values, fmt.Sprintf("ST=%s", name.Value))
|
||||
case 10:
|
||||
values = append(values, fmt.Sprintf("O=%s", name.Value))
|
||||
case 11:
|
||||
values = append(values, fmt.Sprintf("OU=%s", name.Value))
|
||||
default:
|
||||
values = append(values, fmt.Sprintf("UnknownOID=%s", name.Type.String()))
|
||||
}
|
||||
} else if oid.Equal(oidEmailAddress) {
|
||||
values = append(values, fmt.Sprintf("emailAddress=%s", name.Value))
|
||||
} else {
|
||||
values = append(values, fmt.Sprintf("UnknownOID=%s", name.Type.String()))
|
||||
}
|
||||
}
|
||||
if len(values) > 0 {
|
||||
buf.WriteString(values[0])
|
||||
for i := 1; i < len(values); i++ {
|
||||
buf.WriteString(", " + values[i])
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// CertificateText returns a human-readable string representation
|
||||
// of the certificate cert. The format is similar to the OpenSSL
|
||||
// way of printing certificates (not identical).
|
||||
func CertificateText(cert *x509.Certificate) string {
|
||||
var buf strings.Builder
|
||||
|
||||
buf.WriteString(color.Blue("\nCertificate:\n"))
|
||||
if cert.SignatureAlgorithm != x509.UnknownSignatureAlgorithm {
|
||||
buf.WriteString(color.Blue("%4sSignature Algorithm: ", "") + color.Bold(fmt.Sprintf("%s\n", cert.SignatureAlgorithm)))
|
||||
}
|
||||
|
||||
// Issuer information
|
||||
buf.WriteString(color.Blue("%4sIssuer: ", ""))
|
||||
printName(cert.Issuer.Names, &buf)
|
||||
|
||||
// Validity information
|
||||
buf.WriteString(color.Blue("%4sValidity\n", ""))
|
||||
buf.WriteString(color.Bold(fmt.Sprintf("%8sNot Before: %s\n", "", cert.NotBefore.Format(http.TimeFormat))))
|
||||
buf.WriteString(color.Bold(fmt.Sprintf("%8sNot After : %s\n", "", cert.NotAfter.Format(http.TimeFormat))))
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
@@ -70,6 +70,7 @@ const (
|
||||
RegionSubSys = "region"
|
||||
EtcdSubSys = "etcd"
|
||||
StorageClassSubSys = "storage_class"
|
||||
APISubSys = "api"
|
||||
CompressionSubSys = "compression"
|
||||
KmsVaultSubSys = "kms_vault"
|
||||
KmsKesSubSys = "kms_kes"
|
||||
@@ -101,6 +102,7 @@ var SubSystems = set.CreateStringSet([]string{
|
||||
RegionSubSys,
|
||||
EtcdSubSys,
|
||||
CacheSubSys,
|
||||
APISubSys,
|
||||
StorageClassSubSys,
|
||||
CompressionSubSys,
|
||||
KmsVaultSubSys,
|
||||
@@ -128,6 +130,7 @@ var SubSystemsSingleTargets = set.CreateStringSet([]string{
|
||||
RegionSubSys,
|
||||
EtcdSubSys,
|
||||
CacheSubSys,
|
||||
APISubSys,
|
||||
StorageClassSubSys,
|
||||
CompressionSubSys,
|
||||
KmsVaultSubSys,
|
||||
@@ -196,6 +199,23 @@ func (kvs KVS) Empty() bool {
|
||||
return len(kvs) == 0
|
||||
}
|
||||
|
||||
// Keys returns the list of keys for the current KVS
|
||||
func (kvs KVS) Keys() []string {
|
||||
var keys = make([]string, len(kvs))
|
||||
var foundComment bool
|
||||
for i := range kvs {
|
||||
if kvs[i].Key == madmin.CommentKey {
|
||||
foundComment = true
|
||||
}
|
||||
keys[i] = kvs[i].Key
|
||||
}
|
||||
// Comment KV not found, add it explicitly.
|
||||
if !foundComment {
|
||||
keys = append(keys, madmin.CommentKey)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func (kvs KVS) String() string {
|
||||
var s strings.Builder
|
||||
for _, kv := range kvs {
|
||||
@@ -581,9 +601,20 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
|
||||
return Errorf("sub-system '%s' only supports single target", subSystemValue[0])
|
||||
}
|
||||
|
||||
tgt := Default
|
||||
subSys := subSystemValue[0]
|
||||
if len(subSystemValue) == 2 {
|
||||
tgt = subSystemValue[1]
|
||||
}
|
||||
|
||||
fields := madmin.KvFields(inputs[1], defaultKVS[subSys].Keys())
|
||||
if len(fields) == 0 {
|
||||
return Errorf("sub-system '%s' cannot have empty keys", subSys)
|
||||
}
|
||||
|
||||
var kvs = KVS{}
|
||||
var prevK string
|
||||
for _, v := range strings.Fields(inputs[1]) {
|
||||
for _, v := range fields {
|
||||
kv := strings.SplitN(v, KvSeparator, 2)
|
||||
if len(kv) == 0 {
|
||||
continue
|
||||
@@ -604,12 +635,6 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
|
||||
return Errorf("key '%s', cannot have empty value", kv[0])
|
||||
}
|
||||
|
||||
tgt := Default
|
||||
subSys := subSystemValue[0]
|
||||
if len(subSystemValue) == 2 {
|
||||
tgt = subSystemValue[1]
|
||||
}
|
||||
|
||||
_, ok := kvs.Lookup(Enable)
|
||||
// Check if state is required
|
||||
_, enableRequired := defaultKVS[subSys].Lookup(Enable)
|
||||
@@ -621,6 +646,12 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
|
||||
currKVS, ok := c[subSys][tgt]
|
||||
if !ok {
|
||||
currKVS = defaultKVS[subSys]
|
||||
} else {
|
||||
for _, kv := range defaultKVS[subSys] {
|
||||
if _, ok = currKVS.Lookup(kv.Key); !ok {
|
||||
currKVS.Set(kv.Key, kv.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, kv := range kvs {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user