Compare commits
224 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4c81201f95 | ||
|
|
cd8d511d3d | ||
|
|
899a2fa1c7 | ||
|
|
17e17da00d | ||
|
|
a5da9120f3 | ||
|
|
aa12d75d75 | ||
|
|
dd4a2d7419 | ||
|
|
6fcbdd5607 | ||
|
|
3831cc9e3b | ||
|
|
230fc0d186 | ||
|
|
7f9498f43f | ||
|
|
1cf322b7d4 | ||
|
|
3168e93730 | ||
|
|
0b1c824618 | ||
|
|
c851e022b7 | ||
|
|
6f45e303f5 | ||
|
|
5ad032826a | ||
|
|
84bf4624a4 | ||
|
|
dff37aa33d | ||
|
|
d12831eb07 | ||
|
|
4a36cd7035 | ||
|
|
00555c747e | ||
|
|
03490c811b | ||
|
|
48d2c03250 | ||
|
|
ed78854cea | ||
|
|
e60834838f | ||
|
|
d616d8a857 | ||
|
|
24cab7f9df | ||
|
|
b2536476c9 | ||
|
|
02c1a08a5b | ||
|
|
5c47ce456e | ||
|
|
8ea55f9dba | ||
|
|
80e3dce631 | ||
|
|
80fab03b63 | ||
|
|
730d2dc7be | ||
|
|
0ee9678190 | ||
|
|
34859c6d4b | ||
|
|
b1c99e88ac | ||
|
|
0104af6bcc | ||
|
|
224daee391 | ||
|
|
34ea1d2167 | ||
|
|
9d95937018 | ||
|
|
74a7889a3e | ||
|
|
fa01e640f5 | ||
|
|
f355374962 | ||
|
|
bda0fe3150 | ||
|
|
b70995dd60 | ||
|
|
4b6264da7d | ||
|
|
48919de301 | ||
|
|
eb3ded420e | ||
|
|
eb2934f0c1 | ||
|
|
b7438fe4e6 | ||
|
|
ce6cef6855 | ||
|
|
a966ccd17d | ||
|
|
493c714663 | ||
|
|
e959c5d71c | ||
|
|
4a2928eb49 | ||
|
|
af88772a78 | ||
|
|
1dce6918c2 | ||
|
|
9109148474 | ||
|
|
eaaf05a7cc | ||
|
|
52e21bc853 | ||
|
|
16e1a25bc0 | ||
|
|
958661cbb5 | ||
|
|
6019628f7d | ||
|
|
0987069e37 | ||
|
|
6a0372be6c | ||
|
|
c13afd56e8 | ||
|
|
96997d2b21 | ||
|
|
86a3319d41 | ||
|
|
a694ba93d9 | ||
|
|
9f60e84ce1 | ||
|
|
a9aaea0d67 | ||
|
|
572b1721b2 | ||
|
|
b0e1d4ce78 | ||
|
|
eb19c8af40 | ||
|
|
746f1585eb | ||
|
|
2d58a8d861 | ||
|
|
0037951b6e | ||
|
|
fbd1c5f51a | ||
|
|
1c6781757c | ||
|
|
b4e3956e69 | ||
|
|
c51229493b | ||
|
|
631d55aa22 | ||
|
|
8a291e1dc0 | ||
|
|
650dccfa9e | ||
|
|
d08b4b147d | ||
|
|
9a703befe6 | ||
|
|
9a1615768d | ||
|
|
37da0c647e | ||
|
|
2acb530ccd | ||
|
|
3e1fb17b70 | ||
|
|
a89d6b8e3d | ||
|
|
1c085f7d1a | ||
|
|
4b6585d249 | ||
|
|
9ffad7fceb | ||
|
|
18725679c4 | ||
|
|
ba8a8ad818 | ||
|
|
102ad60dee | ||
|
|
cb61e50b51 | ||
|
|
859ef52886 | ||
|
|
f04a1f220c | ||
|
|
cd380251b3 | ||
|
|
92cd1eed45 | ||
|
|
db32a24cb6 | ||
|
|
2d96940826 | ||
|
|
e730da1438 | ||
|
|
46ee8659b4 | ||
|
|
73a6b4ea11 | ||
|
|
c1b88c17cc | ||
|
|
a359e36e35 | ||
|
|
0a2e6d58a5 | ||
|
|
7e80afdd7f | ||
|
|
1b119557c2 | ||
|
|
7778fef6bb | ||
|
|
ea1803417f | ||
|
|
ea5094e842 | ||
|
|
5a974fb10c | ||
|
|
9acdeab73d | ||
|
|
d19b434ffc | ||
|
|
17a1eda702 | ||
|
|
7d50a0cfea | ||
|
|
ceff7bcca5 | ||
|
|
7d1734d033 | ||
|
|
03ec6adfd0 | ||
|
|
309b10f201 | ||
|
|
2a8e40f19f | ||
|
|
5f7bd2b1da | ||
|
|
c097ce9c32 | ||
|
|
caad314faa | ||
|
|
bc2ebe0021 | ||
|
|
21e8440423 | ||
|
|
11aa393ba7 | ||
|
|
d0c910a6f3 | ||
|
|
81c90ae430 | ||
|
|
d15a5ad4cc | ||
|
|
0ff246653b | ||
|
|
113bcbdb78 | ||
|
|
95411228db | ||
|
|
23774353b7 | ||
|
|
052b5262ff | ||
|
|
a2a5ec93d3 | ||
|
|
331c517a5b | ||
|
|
27a774cbe9 | ||
|
|
8e6787a302 | ||
|
|
59352d0ac2 | ||
|
|
75d44b3bae | ||
|
|
95ae6c4b49 | ||
|
|
98ca770f81 | ||
|
|
ccd967e3be | ||
|
|
0ebb73ee2e | ||
|
|
c8b84a0e9e | ||
|
|
3acb5cff45 | ||
|
|
ab801ad3d4 | ||
|
|
74116204ce | ||
|
|
2eb5f934d8 | ||
|
|
b43d376a87 | ||
|
|
e4a44f6224 | ||
|
|
0272973175 | ||
|
|
adca28801d | ||
|
|
d2a3f92452 | ||
|
|
ede86845e5 | ||
|
|
e57c742674 | ||
|
|
bb5976d727 | ||
|
|
670724184c | ||
|
|
f7c1a59de1 | ||
|
|
01a2ccc52f | ||
|
|
51ba1dac49 | ||
|
|
a4463dd40f | ||
|
|
83a82d818e | ||
|
|
1d1c4430b2 | ||
|
|
4e00b47b52 | ||
|
|
43e6d1ce2d | ||
|
|
30da442a85 | ||
|
|
038d91feaa | ||
|
|
e7ba78beee | ||
|
|
ab43804efd | ||
|
|
1c865dd119 | ||
|
|
b32d0a5b60 | ||
|
|
79e21601b0 | ||
|
|
34253aa595 | ||
|
|
79ed7ce451 | ||
|
|
900eebb9a4 | ||
|
|
6914b2c99d | ||
|
|
0dd3a08169 | ||
|
|
f8f290e848 | ||
|
|
9179cdfc9d | ||
|
|
76b6dc0112 | ||
|
|
ce303f5c7e | ||
|
|
b4b7a18497 | ||
|
|
1e2ebc9945 | ||
|
|
a49e3647b6 | ||
|
|
954e17c3d0 | ||
|
|
2a9819aff8 | ||
|
|
8049184dcc | ||
|
|
6c6137b2e7 | ||
|
|
19c4f3082b | ||
|
|
433c2831ae | ||
|
|
9138b2b503 | ||
|
|
6d64aab420 | ||
|
|
77509ce391 | ||
|
|
adcaa6f9de | ||
|
|
ce129efa09 | ||
|
|
121164db56 | ||
|
|
195f95196e | ||
|
|
a20d4568a2 | ||
|
|
e656beb915 | ||
|
|
cd04600862 | ||
|
|
3acc0ebb81 | ||
|
|
88daaef76b | ||
|
|
1cdaced8b6 | ||
|
|
0b8255529a | ||
|
|
019fe69a57 | ||
|
|
d90ab904e7 | ||
|
|
6ae30b21c9 | ||
|
|
b16781846e | ||
|
|
5ce82b45da | ||
|
|
e99bc177c0 | ||
|
|
b68bc75dad | ||
|
|
d61eac080b | ||
|
|
d8be9f12a2 | ||
|
|
0cf4539fe8 | ||
|
|
db6bba709a | ||
|
|
2174a22835 |
1
.github/stale.yml
vendored
1
.github/stale.yml
vendored
@@ -14,6 +14,7 @@ onlyLabels: []
|
||||
exemptLabels:
|
||||
- "security"
|
||||
- "pending discussion"
|
||||
- "do not close"
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
6
.github/workflows/go.yml
vendored
6
.github/workflows/go.yml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- release
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -12,7 +11,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.13.x]
|
||||
go-version: [1.14.x, 1.15.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -39,6 +38,9 @@ jobs:
|
||||
MINIO_CI_CD: 1
|
||||
run: |
|
||||
sudo apt-get install devscripts shellcheck
|
||||
nancy_version=$(curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/sonatype-nexus-community/nancy/releases/latest | sed "s/https:\/\/github.com\/sonatype-nexus-community\/nancy\/releases\/tag\///")
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-linux.amd64-${nancy_version} && chmod +x nancy
|
||||
go list -m all | ./nancy sleuth
|
||||
make
|
||||
diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
|
||||
6
.nancy-ignore
Normal file
6
.nancy-ignore
Normal file
@@ -0,0 +1,6 @@
|
||||
CVE-2018-17075
|
||||
CVE-2018-17848
|
||||
CVE-2018-17846
|
||||
CVE-2018-17847
|
||||
CVE-2018-17143
|
||||
CVE-2018-17142
|
||||
231
CREDITS
231
CREDITS
@@ -10293,25 +10293,6 @@ Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
||||
|
||||
================================================================
|
||||
|
||||
github.com/inconshreveable/go-update
|
||||
https://github.com/inconshreveable/go-update
|
||||
----------------------------------------------------------------
|
||||
Copyright 2015 Alan Shreve
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
================================================================
|
||||
|
||||
github.com/jcmturner/gofork
|
||||
@@ -11470,8 +11451,216 @@ https://github.com/minio/md5-simd
|
||||
|
||||
================================================================
|
||||
|
||||
github.com/minio/minio-go/v6
|
||||
https://github.com/minio/minio-go/v6
|
||||
github.com/minio/minio-go/v7
|
||||
https://github.com/minio/minio-go/v7
|
||||
----------------------------------------------------------------
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
================================================================
|
||||
|
||||
github.com/minio/selfupdate
|
||||
https://github.com/minio/selfupdate
|
||||
----------------------------------------------------------------
|
||||
|
||||
Apache License
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.13-alpine as builder
|
||||
FROM golang:1.14-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
|
||||
@@ -23,8 +23,7 @@ RUN \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS && \
|
||||
/usr/bin/verify-minio.sh
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/master/CREDITS
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ RUN \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/master/CREDITS && \
|
||||
/usr/bin/verify-minio.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -22,7 +22,7 @@ RUN \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/master/CREDITS && \
|
||||
/usr/bin/verify-minio.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -20,7 +20,7 @@ RUN \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/master/CREDITS && \
|
||||
/usr/bin/verify-minio.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -22,7 +22,7 @@ RUN \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/master/CREDITS && \
|
||||
/usr/bin/verify-minio.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
7
Makefile
7
Makefile
@@ -17,11 +17,12 @@ checks:
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
|
||||
@which ruleguard 1>/dev/null || (echo "Installing ruleguard" && GO111MODULE=off go get github.com/quasilyte/go-ruleguard/...)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps fmt lint
|
||||
verifiers: getdeps fmt lint ruleguard
|
||||
|
||||
fmt:
|
||||
@echo "Running $@ check"
|
||||
@@ -33,6 +34,10 @@ lint:
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
||||
|
||||
ruleguard:
|
||||
@echo "Running $@ check"
|
||||
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go github.com/minio/minio/...
|
||||
|
||||
# Builds minio, runs the verifiers then runs the tests.
|
||||
check: test
|
||||
test: verifiers build
|
||||
|
||||
@@ -88,7 +88,7 @@ service minio start
|
||||
```
|
||||
|
||||
## Install from Source
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.13](https://golang.org/dl/#stable)
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.14](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
@@ -177,7 +177,7 @@ mc admin update <minio alias, e.g., myminio>
|
||||
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
|
||||
- `mc admin update` is disabled in kubernetes/container environments, container environments provide their own mechanisms to rollout of updates.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` to any new releases until all clusters have been successfully updated.
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)x
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)
|
||||
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
@@ -193,4 +193,4 @@ mc admin update <minio alias, e.g., myminio>
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
|
||||
## License
|
||||
Use of MinIO is governed by the Apache 2.0 License found at [LICENSE](./LICENSE).
|
||||
Use of MinIO is governed by the Apache 2.0 License found at [LICENSE](https://github.com/minio/minio/blob/master/LICENSE).
|
||||
|
||||
@@ -89,7 +89,7 @@ service minio start
|
||||
|
||||
## 使用源码安装
|
||||
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.13](https://golang.org/dl/#stable)
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.14](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
|
||||
@@ -57,22 +57,6 @@ export class BrowserDropdown extends React.Component {
|
||||
const { fetchServerInfo } = this.props
|
||||
fetchServerInfo()
|
||||
}
|
||||
fullScreen(e) {
|
||||
e.preventDefault()
|
||||
let el = document.documentElement
|
||||
if (el.requestFullscreen) {
|
||||
el.requestFullscreen()
|
||||
}
|
||||
if (el.mozRequestFullScreen) {
|
||||
el.mozRequestFullScreen()
|
||||
}
|
||||
if (el.webkitRequestFullscreen) {
|
||||
el.webkitRequestFullscreen()
|
||||
}
|
||||
if (el.msRequestFullscreen) {
|
||||
el.msRequestFullscreen()
|
||||
}
|
||||
}
|
||||
logout(e) {
|
||||
e.preventDefault()
|
||||
web.Logout()
|
||||
@@ -87,24 +71,30 @@ export class BrowserDropdown extends React.Component {
|
||||
<i className="fas fa-bars" />
|
||||
</Dropdown.Toggle>
|
||||
<Dropdown.Menu className="dropdown-menu-right">
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/?ref=ob">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://github.com/minio/minio">
|
||||
GitHub <i className="fab fa-github" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.fullScreen}>
|
||||
Fullscreen <i className="fas fa-expand" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://slack.min.io">
|
||||
Ask for help <i className="fas fa-question-circle" />
|
||||
<a target="_blank" href="https://min.io/pricing?ref=ob">
|
||||
Get Support <i className="fas fa-question-circle" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
@@ -118,20 +108,9 @@ export class BrowserDropdown extends React.Component {
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" id="logout" onClick={this.logout}>
|
||||
Sign Out <i className="fas fa-sign-out-alt" />
|
||||
Logout <i className="fas fa-sign-out-alt" />
|
||||
</a>
|
||||
</li>
|
||||
</Dropdown.Menu>
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import ObjectsSearch from "../objects/ObjectsSearch"
|
||||
import Path from "../objects/Path"
|
||||
import StorageInfo from "./StorageInfo"
|
||||
import BrowserDropdown from "./BrowserDropdown"
|
||||
@@ -27,6 +28,7 @@ export const Header = () => {
|
||||
<header className="fe-header">
|
||||
<Path />
|
||||
{loggedIn && <StorageInfo />}
|
||||
{loggedIn && <ObjectsSearch />}
|
||||
<ul className="feh-actions">
|
||||
{loggedIn ? (
|
||||
<BrowserDropdown />
|
||||
|
||||
@@ -22,7 +22,8 @@ const bucketsFilterSelector = state => state.buckets.filter
|
||||
export const getFilteredBuckets = createSelector(
|
||||
bucketsSelector,
|
||||
bucketsFilterSelector,
|
||||
(buckets, filter) => buckets.filter(bucket => bucket.indexOf(filter) > -1)
|
||||
(buckets, filter) => buckets.filter(
|
||||
bucket => bucket.toLowerCase().indexOf(filter.toLowerCase()) > -1)
|
||||
)
|
||||
|
||||
export const getCurrentBucket = state => state.buckets.currentBucket
|
||||
|
||||
@@ -18,6 +18,7 @@ import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import InfiniteScroll from "react-infinite-scroller"
|
||||
import ObjectsList from "./ObjectsList"
|
||||
import { getFilteredObjects } from "./selectors"
|
||||
|
||||
export class ObjectsListContainer extends React.Component {
|
||||
constructor(props) {
|
||||
@@ -39,22 +40,29 @@ export class ObjectsListContainer extends React.Component {
|
||||
})
|
||||
}
|
||||
}
|
||||
componentDidUpdate(prevProps) {
|
||||
if (this.props.filter !== prevProps.filter) {
|
||||
this.setState({
|
||||
page: 1
|
||||
})
|
||||
}
|
||||
}
|
||||
loadNextPage() {
|
||||
this.setState(state => {
|
||||
return { page: state.page + 1 }
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { objects, listLoading } = this.props
|
||||
const { filteredObjects, listLoading } = this.props
|
||||
|
||||
const visibleObjects = objects.slice(0, this.state.page * 100)
|
||||
const visibleObjects = filteredObjects.slice(0, this.state.page * 100)
|
||||
|
||||
return (
|
||||
<div style={{ position: "relative" }}>
|
||||
<InfiniteScroll
|
||||
pageStart={0}
|
||||
loadMore={this.loadNextPage}
|
||||
hasMore={objects.length > visibleObjects.length}
|
||||
hasMore={filteredObjects.length > visibleObjects.length}
|
||||
useWindow={true}
|
||||
initialLoad={false}
|
||||
>
|
||||
@@ -70,7 +78,8 @@ const mapStateToProps = state => {
|
||||
return {
|
||||
currentBucket: state.buckets.currentBucket,
|
||||
currentPrefix: state.objects.currentPrefix,
|
||||
objects: state.objects.list,
|
||||
filteredObjects: getFilteredObjects(state),
|
||||
filter: state.objects.filter,
|
||||
sortBy: state.objects.sortBy,
|
||||
sortOrder: state.objects.sortOrder,
|
||||
listLoading: state.objects.listLoading
|
||||
|
||||
43
browser/app/js/objects/ObjectsSearch.js
Normal file
43
browser/app/js/objects/ObjectsSearch.js
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import * as actionsObjects from "./actions"
|
||||
|
||||
export const ObjectsSearch = ({ onChange }) => (
|
||||
<div
|
||||
className="input-group ig-left ig-search-dark"
|
||||
style={{ display: "block" }}
|
||||
>
|
||||
<input
|
||||
className="ig-text"
|
||||
type="input"
|
||||
placeholder="Search Objects..."
|
||||
onChange={e => onChange(e.target.value)}
|
||||
/>
|
||||
<i className="ig-helpers" />
|
||||
</div>
|
||||
)
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
return {
|
||||
onChange: filter =>
|
||||
dispatch(actionsObjects.setFilter(filter))
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(undefined, mapDispatchToProps)(ObjectsSearch)
|
||||
@@ -20,13 +20,13 @@ import { ObjectsListContainer } from "../ObjectsListContainer"
|
||||
|
||||
describe("ObjectsList", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsListContainer objects={[]} />)
|
||||
shallow(<ObjectsListContainer filteredObjects={[]} />)
|
||||
})
|
||||
|
||||
it("should render ObjectsList with objects", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
objects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
filteredObjects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.find("ObjectsList").length).toBe(1)
|
||||
@@ -40,7 +40,7 @@ describe("ObjectsList", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
currentBucket="test1"
|
||||
objects={[]}
|
||||
filteredObjects={[]}
|
||||
listLoading={true}
|
||||
/>
|
||||
)
|
||||
|
||||
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { ObjectsSearch } from "../ObjectsSearch"
|
||||
|
||||
describe("ObjectsSearch", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsSearch />)
|
||||
})
|
||||
|
||||
it("should call onChange with search text", () => {
|
||||
const onChange = jest.fn()
|
||||
const wrapper = shallow(<ObjectsSearch onChange={onChange} />)
|
||||
wrapper.find("input").simulate("change", { target: { value: "test" } })
|
||||
expect(onChange).toHaveBeenCalledWith("test")
|
||||
})
|
||||
})
|
||||
@@ -23,6 +23,7 @@ describe("objects reducer", () => {
|
||||
const initialState = reducer(undefined, {})
|
||||
expect(initialState).toEqual({
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
|
||||
@@ -36,6 +36,7 @@ import { getServerInfo, hasServerPublicDomain } from '../browser/selectors'
|
||||
|
||||
export const SET_LIST = "objects/SET_LIST"
|
||||
export const RESET_LIST = "objects/RESET_LIST"
|
||||
export const SET_FILTER = "objects/SET_FILTER"
|
||||
export const APPEND_LIST = "objects/APPEND_LIST"
|
||||
export const REMOVE = "objects/REMOVE"
|
||||
export const SET_SORT_BY = "objects/SET_SORT_BY"
|
||||
@@ -57,6 +58,13 @@ export const resetList = () => ({
|
||||
type: RESET_LIST,
|
||||
})
|
||||
|
||||
export const setFilter = filter => {
|
||||
return {
|
||||
type: SET_FILTER,
|
||||
filter
|
||||
}
|
||||
}
|
||||
|
||||
export const setListLoading = (listLoading) => ({
|
||||
type: SET_LIST_LOADING,
|
||||
listLoading,
|
||||
|
||||
@@ -28,6 +28,7 @@ const removeObject = (list, objectToRemove, lookup) => {
|
||||
export default (
|
||||
state = {
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
@@ -53,6 +54,11 @@ export default (
|
||||
...state,
|
||||
list: []
|
||||
}
|
||||
case actionsObjects.SET_FILTER:
|
||||
return {
|
||||
...state,
|
||||
filter: action.filter
|
||||
}
|
||||
case actionsObjects.SET_LIST_LOADING:
|
||||
return {
|
||||
...state,
|
||||
|
||||
@@ -21,3 +21,13 @@ export const getCurrentPrefix = state => state.objects.currentPrefix
|
||||
export const getCheckedList = state => state.objects.checkedList
|
||||
|
||||
export const getPrefixWritable = state => state.objects.prefixWritable
|
||||
|
||||
const objectsSelector = state => state.objects.list
|
||||
const objectsFilterSelector = state => state.objects.filter
|
||||
|
||||
export const getFilteredObjects = createSelector(
|
||||
objectsSelector,
|
||||
objectsFilterSelector,
|
||||
(objects, filter) => objects.filter(
|
||||
object => object.name.toLowerCase().startsWith(filter.toLowerCase()))
|
||||
)
|
||||
@@ -169,6 +169,24 @@ select.form-control {
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search-dark {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
font-weight: 900;
|
||||
content: '\f002';
|
||||
font-size: 15px;
|
||||
position: absolute;
|
||||
left: 2px;
|
||||
top: 8px;
|
||||
color: rgba(0, 0, 0, 0.5);
|
||||
}
|
||||
|
||||
.ig-text {
|
||||
padding-left: 25px;
|
||||
.placeholder(rgba(0, 0, 0, 0.5))
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
@@ -270,4 +288,4 @@ select.form-control {
|
||||
.set-expire-decrease {
|
||||
bottom: -27px;
|
||||
.rotate(-180deg);
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -45,88 +45,63 @@ FUNCTIONAL_TESTS="$WORK_DIR/functional-tests.sh"
|
||||
function start_minio_fs()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 10
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure_sets()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk-sets{1...32}" >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets_ipv6()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address=:9002 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function run_test_fs()
|
||||
{
|
||||
minio_pid="$(start_minio_fs)"
|
||||
start_minio_fs
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -138,12 +113,12 @@ function run_test_fs()
|
||||
}
|
||||
|
||||
function run_test_erasure_sets() {
|
||||
minio_pid="$(start_minio_erasure_sets)"
|
||||
start_minio_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -156,14 +131,12 @@ function run_test_erasure_sets() {
|
||||
|
||||
function run_test_zone_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets) )
|
||||
start_minio_zone_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -182,16 +155,14 @@ function run_test_zone_erasure_sets()
|
||||
|
||||
function run_test_zone_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets_ipv6) )
|
||||
start_minio_zone_erasure_sets_ipv6
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -210,12 +181,12 @@ function run_test_zone_erasure_sets_ipv6()
|
||||
|
||||
function run_test_erasure()
|
||||
{
|
||||
minio_pid="$(start_minio_erasure)"
|
||||
start_minio_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -228,14 +199,12 @@ function run_test_erasure()
|
||||
|
||||
function run_test_dist_erasure()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure) )
|
||||
start_minio_dist_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
@@ -122,8 +121,8 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
writeSuccessResponseJSON(w, configData)
|
||||
}
|
||||
|
||||
// SetBucketTargetHandler - sets a remote target for bucket
|
||||
func (a adminAPIHandlers) SetBucketTargetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// SetRemoteTargetHandler - sets a remote target for bucket
|
||||
func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetBucketTarget")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetBucketTarget", mustGetClaimsFromToken(r))
|
||||
@@ -174,6 +173,7 @@ func (a adminAPIHandlers) SetBucketTargetHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBucketRemoteIdenticalToSource), r.URL)
|
||||
return
|
||||
}
|
||||
target.SourceBucket = bucket
|
||||
target.Arn = globalBucketTargetSys.getRemoteARN(bucket, &target)
|
||||
if target.Arn == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
@@ -183,7 +183,7 @@ func (a adminAPIHandlers) SetBucketTargetHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListTargets(ctx, bucket)
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -209,9 +209,9 @@ func (a adminAPIHandlers) SetBucketTargetHandler(w http.ResponseWriter, r *http.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// ListBucketTargetsHandler - lists remote target(s) for a bucket or gets a target
|
||||
// ListRemoteTargetsHandler - lists remote target(s) for a bucket or gets a target
|
||||
// for a particular ARN type
|
||||
func (a adminAPIHandlers) ListBucketTargetsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBucketTargets")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListBucketTargets", mustGetClaimsFromToken(r))
|
||||
@@ -225,28 +225,13 @@ func (a adminAPIHandlers) ListBucketTargetsHandler(w http.ResponseWriter, r *htt
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
var (
|
||||
targets []madmin.BucketTarget
|
||||
tgt, ct madmin.BucketTarget
|
||||
creds auth.Credentials
|
||||
)
|
||||
if cfg != nil && !cfg.Empty() {
|
||||
for idx, t := range cfg.Targets {
|
||||
if string(t.Type) == arnType || arnType == "" {
|
||||
ct = cfg.Targets[idx]
|
||||
// remove secretKey from creds
|
||||
creds.AccessKey = ct.Credentials.AccessKey
|
||||
tgt = madmin.BucketTarget{Endpoint: ct.Endpoint, Secure: ct.Secure, TargetBucket: ct.TargetBucket, Credentials: &creds, Arn: ct.Arn, Type: ct.Type}
|
||||
targets = append(targets, tgt)
|
||||
}
|
||||
if bucket != "" {
|
||||
if _, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
targets := globalBucketTargetSys.ListTargets(ctx, bucket, arnType)
|
||||
data, err := json.Marshal(targets)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -256,8 +241,8 @@ func (a adminAPIHandlers) ListBucketTargetsHandler(w http.ResponseWriter, r *htt
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// RemoveBucketTargetHandler - removes a remote target for bucket with specified ARN
|
||||
func (a adminAPIHandlers) RemoveBucketTargetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// RemoveRemoteTargetHandler - removes a remote target for bucket with specified ARN
|
||||
func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveBucketTarget")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveBucketTarget", mustGetClaimsFromToken(r))
|
||||
@@ -275,11 +260,18 @@ func (a adminAPIHandlers) RemoveBucketTargetHandler(w http.ResponseWriter, r *ht
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketTargetSys.RemoveTarget(ctx, bucket, arn); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListTargets(ctx, bucket)
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
@@ -136,7 +136,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCount()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -271,7 +271,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCount()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -383,7 +383,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCount()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -799,6 +799,59 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
keepConnLive(w, r, respCh)
|
||||
}
|
||||
|
||||
func getAggregatedBackgroundHealState(ctx context.Context) (madmin.BgHealState, error) {
|
||||
var bgHealStates []madmin.BgHealState
|
||||
|
||||
localHealState, ok := getLocalBackgroundHealStatus()
|
||||
if !ok {
|
||||
return madmin.BgHealState{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
// Get local heal status first
|
||||
bgHealStates = append(bgHealStates, localHealState)
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Get heal status from other peers
|
||||
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus()
|
||||
var errCount int
|
||||
for _, nerr := range nerrs {
|
||||
if nerr.Err != nil {
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
errCount++
|
||||
}
|
||||
}
|
||||
if errCount == len(nerrs) {
|
||||
return madmin.BgHealState{}, fmt.Errorf("all remote servers failed to report heal status, cluster is unhealthy")
|
||||
}
|
||||
bgHealStates = append(bgHealStates, peersHealStates...)
|
||||
}
|
||||
|
||||
// Aggregate healing result
|
||||
var aggregatedHealStateResult = madmin.BgHealState{
|
||||
ScannedItemsCount: bgHealStates[0].ScannedItemsCount,
|
||||
LastHealActivity: bgHealStates[0].LastHealActivity,
|
||||
NextHealRound: bgHealStates[0].NextHealRound,
|
||||
HealDisks: bgHealStates[0].HealDisks,
|
||||
}
|
||||
|
||||
bgHealStates = bgHealStates[1:]
|
||||
|
||||
for _, state := range bgHealStates {
|
||||
aggregatedHealStateResult.ScannedItemsCount += state.ScannedItemsCount
|
||||
aggregatedHealStateResult.HealDisks = append(aggregatedHealStateResult.HealDisks, state.HealDisks...)
|
||||
if !state.LastHealActivity.IsZero() && aggregatedHealStateResult.LastHealActivity.Before(state.LastHealActivity) {
|
||||
aggregatedHealStateResult.LastHealActivity = state.LastHealActivity
|
||||
// The node which has the last heal activity means its
|
||||
// is the node that is orchestrating self healing operations,
|
||||
// which also means it is the same node which decides when
|
||||
// the next self healing operation will be done.
|
||||
aggregatedHealStateResult.NextHealRound = state.NextHealRound
|
||||
}
|
||||
}
|
||||
|
||||
return aggregatedHealStateResult, nil
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HealBackgroundStatus")
|
||||
|
||||
@@ -815,39 +868,13 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
var bgHealStates []madmin.BgHealState
|
||||
|
||||
// Get local heal status first
|
||||
bgHealStates = append(bgHealStates, getLocalBackgroundHealStatus())
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Get heal status from other peers
|
||||
peersHealStates := globalNotificationSys.BackgroundHealStatus()
|
||||
bgHealStates = append(bgHealStates, peersHealStates...)
|
||||
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context())
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Aggregate healing result
|
||||
var aggregatedHealStateResult = madmin.BgHealState{
|
||||
ScannedItemsCount: bgHealStates[0].ScannedItemsCount,
|
||||
LastHealActivity: bgHealStates[0].LastHealActivity,
|
||||
NextHealRound: bgHealStates[0].NextHealRound,
|
||||
}
|
||||
|
||||
bgHealStates = bgHealStates[1:]
|
||||
|
||||
for _, state := range bgHealStates {
|
||||
aggregatedHealStateResult.ScannedItemsCount += state.ScannedItemsCount
|
||||
if !state.LastHealActivity.IsZero() && aggregatedHealStateResult.LastHealActivity.Before(state.LastHealActivity) {
|
||||
aggregatedHealStateResult.LastHealActivity = state.LastHealActivity
|
||||
// The node which has the last heal activity means its
|
||||
// is the node that is orchestrating self healing operations,
|
||||
// which also means it is the same node which decides when
|
||||
// the next self healing operation will be done.
|
||||
aggregatedHealStateResult.NextHealRound = state.NextHealRound
|
||||
}
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode(aggregatedHealStateResult); err != nil {
|
||||
if err := json.NewEncoder(w).Encode(aggregateHealStateResult); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1063,7 +1090,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
// Avoid reusing tcp connection if read timeout is hit
|
||||
// This is needed to make r.Context().Done() work as
|
||||
// expected in case of read timeout
|
||||
w.Header().Add("Connection", "close")
|
||||
w.Header().Set("Connection", "close")
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
|
||||
@@ -1214,7 +1241,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
query := r.URL.Query()
|
||||
obdInfo := madmin.OBDInfo{}
|
||||
obdInfoCh := make(chan madmin.OBDInfo)
|
||||
|
||||
@@ -1248,10 +1275,9 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
deadlinedCtx, cancel := context.WithTimeout(ctx, deadline)
|
||||
|
||||
defer cancel()
|
||||
|
||||
nsLock := objectAPI.NewNSLock(deadlinedCtx, minioMetaBucket, "obd-in-progress")
|
||||
nsLock := objectAPI.NewNSLock(ctx, minioMetaBucket, "obd-in-progress")
|
||||
if err := nsLock.GetLock(newDynamicTimeout(deadline, deadline)); err != nil { // returns a locked lock
|
||||
errResp(err)
|
||||
return
|
||||
@@ -1261,7 +1287,13 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
go func() {
|
||||
defer close(obdInfoCh)
|
||||
|
||||
if cpu, ok := vars["syscpu"]; ok && cpu == "true" {
|
||||
if log := query.Get("log"); log == "true" {
|
||||
obdInfo.Logging.ServersLog = append(obdInfo.Logging.ServersLog, getLocalLogOBD(deadlinedCtx, r))
|
||||
obdInfo.Logging.ServersLog = append(obdInfo.Logging.ServersLog, globalNotificationSys.LogOBDInfo(deadlinedCtx)...)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if cpu := query.Get("syscpu"); cpu == "true" {
|
||||
cpuInfo := getLocalCPUOBDInfo(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.CPUInfo = append(obdInfo.Sys.CPUInfo, cpuInfo)
|
||||
@@ -1269,7 +1301,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if diskHw, ok := vars["sysdiskhw"]; ok && diskHw == "true" {
|
||||
if diskHw := query.Get("sysdiskhw"); diskHw == "true" {
|
||||
diskHwInfo := getLocalDiskHwOBD(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.DiskHwInfo = append(obdInfo.Sys.DiskHwInfo, diskHwInfo)
|
||||
@@ -1277,7 +1309,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if osInfo, ok := vars["sysosinfo"]; ok && osInfo == "true" {
|
||||
if osInfo := query.Get("sysosinfo"); osInfo == "true" {
|
||||
osInfo := getLocalOsInfoOBD(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.OsInfo = append(obdInfo.Sys.OsInfo, osInfo)
|
||||
@@ -1285,7 +1317,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if mem, ok := vars["sysmem"]; ok && mem == "true" {
|
||||
if mem := query.Get("sysmem"); mem == "true" {
|
||||
memInfo := getLocalMemOBD(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.MemInfo = append(obdInfo.Sys.MemInfo, memInfo)
|
||||
@@ -1293,7 +1325,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if proc, ok := vars["sysprocess"]; ok && proc == "true" {
|
||||
if proc := query.Get("sysprocess"); proc == "true" {
|
||||
procInfo := getLocalProcOBD(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.ProcInfo = append(obdInfo.Sys.ProcInfo, procInfo)
|
||||
@@ -1301,14 +1333,14 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if config, ok := vars["minioconfig"]; ok && config == "true" {
|
||||
if config := query.Get("minioconfig"); config == "true" {
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
logger.LogIf(ctx, err)
|
||||
obdInfo.Minio.Config = cfg
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if drive, ok := vars["perfdrive"]; ok && drive == "true" {
|
||||
if drive := query.Get("perfdrive"); drive == "true" {
|
||||
// Get drive obd details from local server's drive(s)
|
||||
driveOBDSerial := getLocalDrivesOBD(deadlinedCtx, false, globalEndpoints, r)
|
||||
driveOBDParallel := getLocalDrivesOBD(deadlinedCtx, true, globalEndpoints, r)
|
||||
@@ -1339,7 +1371,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if net, ok := vars["perfnet"]; ok && net == "true" && globalIsDistErasure {
|
||||
if net := query.Get("perfnet"); net == "true" && globalIsDistErasure {
|
||||
obdInfo.Perf.Net = append(obdInfo.Perf.Net, globalNotificationSys.NetOBDInfo(deadlinedCtx))
|
||||
partialWrite(obdInfo)
|
||||
|
||||
@@ -1353,6 +1385,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
obdInfo.Perf.NetParallel = globalNotificationSys.NetOBDParallelInfo(deadlinedCtx)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
@@ -1468,17 +1501,34 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
Notifications: notifyTarget,
|
||||
}
|
||||
|
||||
// Collect any disk healing.
|
||||
healing, _ := getAggregatedBackgroundHealState(ctx)
|
||||
healDisks := make(map[string]struct{}, len(healing.HealDisks))
|
||||
for _, disk := range healing.HealDisks {
|
||||
healDisks[disk] = struct{}{}
|
||||
}
|
||||
|
||||
// find all disks which belong to each respective endpoints
|
||||
for i := range servers {
|
||||
for _, disk := range storageInfo.Disks {
|
||||
if strings.Contains(disk.Endpoint, servers[i].Endpoint) {
|
||||
if _, ok := healDisks[disk.Endpoint]; ok {
|
||||
disk.Healing = true
|
||||
}
|
||||
servers[i].Disks = append(servers[i].Disks, disk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add all the disks local to this server.
|
||||
for _, disk := range storageInfo.Disks {
|
||||
if disk.DrivePath == "" && disk.Endpoint == "" {
|
||||
continue
|
||||
}
|
||||
if disk.Endpoint == disk.DrivePath {
|
||||
if _, ok := healDisks[disk.Endpoint]; ok {
|
||||
disk.Healing = true
|
||||
}
|
||||
servers[len(servers)-1].Disks = append(servers[len(servers)-1].Disks, disk)
|
||||
}
|
||||
}
|
||||
@@ -1514,7 +1564,7 @@ func fetchLambdaInfo(cfg config.Config) []map[string][]madmin.TargetIDStatus {
|
||||
// Fetch the configured targets
|
||||
tr := NewGatewayHTTPTransport()
|
||||
defer tr.CloseIdleConnections()
|
||||
targetList, err := notify.FetchRegisteredTargets(cfg, GlobalContext.Done(), tr, true, false)
|
||||
targetList, err := notify.FetchRegisteredTargets(GlobalContext, cfg, tr, true, false)
|
||||
if err != nil && err != notify.ErrTargetsOffline {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return nil
|
||||
@@ -1558,12 +1608,12 @@ func fetchVaultStatus(cfg config.Config) madmin.Vault {
|
||||
keyID := GlobalKMS.DefaultKeyID()
|
||||
kmsInfo := GlobalKMS.Info()
|
||||
|
||||
if kmsInfo.Endpoint == "" {
|
||||
if len(kmsInfo.Endpoints) == 0 {
|
||||
vault.Status = "KMS configured using master key"
|
||||
return vault
|
||||
}
|
||||
|
||||
if err := checkConnection(kmsInfo.Endpoint, 15*time.Second); err != nil {
|
||||
if err := checkConnection(kmsInfo.Endpoints[0], 15*time.Second); err != nil {
|
||||
vault.Status = "offline"
|
||||
} else {
|
||||
vault.Status = "online"
|
||||
@@ -1631,23 +1681,32 @@ func fetchLoggerInfo(cfg config.Config) ([]madmin.Logger, []madmin.Audit) {
|
||||
|
||||
// checkConnection - ping an endpoint , return err in case of no connection
|
||||
func checkConnection(endpointStr string, timeout time.Duration) error {
|
||||
tr := newCustomHTTPTransport(&tls.Config{RootCAs: globalRootCAs}, timeout)()
|
||||
defer tr.CloseIdleConnections()
|
||||
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, timeout)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequest(http.MethodHead, endpointStr, nil)
|
||||
client := &http.Client{Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: xhttp.NewCustomDialContext(timeout),
|
||||
ResponseHeaderTimeout: 5 * time.Second,
|
||||
TLSHandshakeTimeout: 5 * time.Second,
|
||||
ExpectContinueTimeout: 5 * time.Second,
|
||||
TLSClientConfig: &tls.Config{RootCAs: globalRootCAs},
|
||||
// Go net/http automatically unzip if content-type is
|
||||
// gzip disable this feature, as we are always interested
|
||||
// in raw stream.
|
||||
DisableCompression: true,
|
||||
}}
|
||||
defer client.CloseIdleConnections()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, endpointStr, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := &http.Client{Transport: tr}
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer xhttp.DrainBody(resp.Body)
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -88,13 +88,15 @@ type allHealState struct {
|
||||
sync.Mutex
|
||||
|
||||
// map of heal path to heal sequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healLocalDisks map[Endpoint]struct{}
|
||||
}
|
||||
|
||||
// newHealState - initialize global heal state management
|
||||
func newHealState() *allHealState {
|
||||
healState := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healLocalDisks: map[Endpoint]struct{}{},
|
||||
}
|
||||
|
||||
go healState.periodicHealSeqsClean(GlobalContext)
|
||||
@@ -102,6 +104,42 @@ func newHealState() *allHealState {
|
||||
return healState
|
||||
}
|
||||
|
||||
func (ahs *allHealState) healDriveCount() int {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
return len(ahs.healLocalDisks)
|
||||
}
|
||||
|
||||
func (ahs *allHealState) getHealLocalDisks() Endpoints {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
var healLocalDisks Endpoints
|
||||
for ep := range ahs.healLocalDisks {
|
||||
healLocalDisks = append(healLocalDisks, ep)
|
||||
}
|
||||
return healLocalDisks
|
||||
}
|
||||
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
delete(ahs.healLocalDisks, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
ahs.healLocalDisks[ep] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
@@ -485,9 +523,12 @@ func (h *healSequence) isQuitting() bool {
|
||||
// check if the heal sequence has ended
|
||||
func (h *healSequence) hasEnded() bool {
|
||||
h.mutex.RLock()
|
||||
ended := len(h.currentStatus.Items) == 0 || h.currentStatus.Summary == healStoppedStatus || h.currentStatus.Summary == healFinishedStatus
|
||||
h.mutex.RUnlock()
|
||||
return ended
|
||||
defer h.mutex.RUnlock()
|
||||
// background heal never ends
|
||||
if h.clientToken == bgHealingUUID {
|
||||
return false
|
||||
}
|
||||
return len(h.currentStatus.Items) == 0 || h.currentStatus.Summary == healStoppedStatus || h.currentStatus.Summary == healFinishedStatus
|
||||
}
|
||||
|
||||
// stops the heal sequence - safe to call multiple times.
|
||||
@@ -627,6 +668,12 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
|
||||
globalBackgroundHealRoutine.queueHealTask(task)
|
||||
|
||||
select {
|
||||
@@ -634,9 +681,11 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
if !h.reportProgress {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and
|
||||
// return success.
|
||||
// return the error and not calculate this object
|
||||
// as part of the metrics.
|
||||
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
|
||||
return nil
|
||||
// Return the error so that caller can handle it.
|
||||
return res.err
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
@@ -700,14 +749,13 @@ func (h *healSequence) healItemsFromSourceCh() error {
|
||||
if err := h.queueHealTask(source, itemType); err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectExistsAsDirectory:
|
||||
case ObjectNotFound:
|
||||
case VersionNotFound:
|
||||
default:
|
||||
logger.LogIf(h.ctx, fmt.Errorf("Heal attempt failed for %s: %w",
|
||||
pathJoin(source.bucket, source.object), err))
|
||||
}
|
||||
}
|
||||
|
||||
h.scannedItemsMap[itemType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
@@ -773,17 +821,17 @@ func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
herr := h.queueHealTask(healSource{
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemBucketMetadata)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if isErrObjectNotFound(herr) || isErrVersionNotFound(herr) {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return herr
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -844,7 +892,9 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
|
||||
return err
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if bucketsOnly {
|
||||
@@ -855,9 +905,12 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
if h.object != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
// and if so heal it.
|
||||
_, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
oi, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
if err == nil {
|
||||
if err = h.healObject(bucket, h.object, ""); err != nil {
|
||||
if err = h.healObject(bucket, h.object, oi.VersionID); err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -867,7 +920,11 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -884,9 +941,10 @@ func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
return h.queueHealTask(healSource{
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -183,14 +183,14 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
}
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-bucket-targets").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.ListBucketTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.SetBucketTargetHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// SetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-bucket-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveBucketTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
}
|
||||
|
||||
// -- Top APIs --
|
||||
@@ -212,18 +212,8 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
if !globalIsGateway {
|
||||
// -- OBD API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
|
||||
Queries("perfdrive", "{perfdrive:true|false}",
|
||||
"perfnet", "{perfnet:true|false}",
|
||||
"minioinfo", "{minioinfo:true|false}",
|
||||
"minioconfig", "{minioconfig:true|false}",
|
||||
"syscpu", "{syscpu:true|false}",
|
||||
"sysdiskhw", "{sysdiskhw:true|false}",
|
||||
"sysosinfo", "{sysosinfo:true|false}",
|
||||
"sysmem", "{sysmem:true|false}",
|
||||
"sysprocess", "{sysprocess:true|false}",
|
||||
)
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
@@ -107,13 +107,16 @@ const (
|
||||
ErrNoSuchWebsiteConfiguration
|
||||
ErrReplicationConfigurationNotFoundError
|
||||
ErrReplicationDestinationNotFoundError
|
||||
ErrReplicationDestinationMissingLock
|
||||
ErrReplicationTargetNotFoundError
|
||||
ErrReplicationRemoteConnectionError
|
||||
ErrBucketRemoteIdenticalToSource
|
||||
ErrBucketRemoteAlreadyExists
|
||||
ErrBucketRemoteArnTypeInvalid
|
||||
ErrBucketRemoteArnInvalid
|
||||
ErrBucketRemoteRemoveDisallowed
|
||||
ErrReplicationTargetNotVersionedError
|
||||
ErrReplicationSourceNotVersionedError
|
||||
ErrReplicationNeedsVersioningError
|
||||
ErrReplicationBucketNeedsVersioningError
|
||||
ErrBucketReplicationDisabledError
|
||||
@@ -661,20 +664,9 @@ var errorCodes = errorCodeMap{
|
||||
Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrMalformedCredentialDate: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; the region 'us-east-' is wrong; expecting 'us-east-1'".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrMalformedCredentialRegion: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; the region is wrong;",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format. This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRegion: {
|
||||
@@ -682,9 +674,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Region does not match.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect service \"s4\". This endpoint belongs to \"s3\".".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrInvalidServiceS3: {
|
||||
Code: "AuthorizationParametersError",
|
||||
Description: "Error parsing the Credential/X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
|
||||
@@ -695,9 +684,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Error parsing the Credential parameter; incorrect service. This endpoint belongs to \"sts\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal "aws4_reque". This endpoint uses "aws4_request".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrInvalidRequestVersion: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal. This endpoint uses \"aws4_request\".",
|
||||
@@ -768,8 +754,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Your key is too long",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// FIXME: Actual XML error response also contains the header which missed in list of signed header parameters.
|
||||
ErrUnsignedHeaders: {
|
||||
Code: "AccessDenied",
|
||||
Description: "There were headers present in the request which were not signed",
|
||||
@@ -830,11 +814,21 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The replication destination bucket does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationDestinationMissingLock: {
|
||||
Code: "ReplicationDestinationMissingLockError",
|
||||
Description: "The replication destination bucket does not have object locking enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationTargetNotFoundError: {
|
||||
Code: "XminioAdminReplicationTargetNotFoundError",
|
||||
Description: "The replication target does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationRemoteConnectionError: {
|
||||
Code: "XminioAdminReplicationRemoteConnectionError",
|
||||
Description: "Remote service endpoint or target bucket not available",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrBucketRemoteIdenticalToSource: {
|
||||
Code: "XminioAdminRemoteIdenticalToSource",
|
||||
Description: "The remote target cannot be identical to source",
|
||||
@@ -865,6 +859,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The replication target does not have versioning enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationSourceNotVersionedError: {
|
||||
Code: "ReplicationSourceNotVersionedError",
|
||||
Description: "The replication source does not have versioning enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationNeedsVersioningError: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration",
|
||||
@@ -972,7 +971,7 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMetadataTooLarge: {
|
||||
Code: "InvalidArgument",
|
||||
Code: "MetadataTooLarge",
|
||||
Description: "Your metadata headers exceed the maximum allowed metadata size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
@@ -1909,8 +1908,12 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrReplicationConfigurationNotFoundError
|
||||
case BucketReplicationDestinationNotFound:
|
||||
apiErr = ErrReplicationDestinationNotFoundError
|
||||
case BucketReplicationDestinationMissingLock:
|
||||
apiErr = ErrReplicationDestinationMissingLock
|
||||
case BucketRemoteTargetNotFound:
|
||||
apiErr = ErrReplicationTargetNotFoundError
|
||||
case BucketRemoteConnectionErr:
|
||||
apiErr = ErrReplicationRemoteConnectionError
|
||||
case BucketRemoteAlreadyExists:
|
||||
apiErr = ErrBucketRemoteAlreadyExists
|
||||
case BucketRemoteArnTypeInvalid:
|
||||
@@ -1921,6 +1924,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrBucketRemoteRemoveDisallowed
|
||||
case BucketReplicationTargetNotVersioned:
|
||||
apiErr = ErrReplicationTargetNotVersionedError
|
||||
case BucketReplicationSourceNotVersioned:
|
||||
apiErr = ErrReplicationSourceNotVersionedError
|
||||
case BucketQuotaExceeded:
|
||||
apiErr = ErrAdminBucketQuotaExceeded
|
||||
case *event.ErrInvalidEventName:
|
||||
|
||||
@@ -128,6 +128,11 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
var isSet bool
|
||||
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
|
||||
if !strings.HasPrefix(k, userMetadataPrefix) {
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -35,7 +36,7 @@ import (
|
||||
const (
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 10000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
@@ -407,7 +408,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
// generates ListBucketsResponse from array of BucketInfo which can be
|
||||
// serialized to match XML and JSON API spec output.
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
var listbuckets []Bucket
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{}
|
||||
|
||||
@@ -427,8 +428,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
var versions []ObjectVersion
|
||||
var prefixes []CommonPrefix
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListVersionsResponse{}
|
||||
|
||||
@@ -472,6 +472,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
data.VersionIDMarker = versionIDMarker
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -483,8 +484,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
var contents []Object
|
||||
var prefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsResponse{}
|
||||
|
||||
@@ -516,9 +516,10 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
data.Marker = s3EncodeName(marker, encodingType)
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -530,8 +531,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
var contents []Object
|
||||
var commonPrefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
@@ -564,6 +564,10 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
content.UserMetadata[k] = v
|
||||
}
|
||||
}
|
||||
@@ -580,6 +584,8 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
|
||||
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
|
||||
data.IsTruncated = isTruncated
|
||||
|
||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -759,6 +765,10 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "InvalidRegion":
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalServerRegion)
|
||||
case "AuthorizationHeaderMalformed":
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
@@ -817,38 +827,3 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
// writeCustomErrorResponseXML - similar to writeErrorResponse,
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err APIError, errBody string, reqURL *url.URL, browser bool) {
|
||||
|
||||
switch err.Code {
|
||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalBrowserEnabled {
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
errorResponse := APIErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: errBody,
|
||||
Resource: reqURL.Path,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
||||
HostID: globalDeploymentID,
|
||||
}
|
||||
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
@@ -60,31 +61,50 @@ func newCachedObjectLayerFn() CacheObjectLayer {
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
CacheAPI func() CacheObjectLayer
|
||||
// Returns true of handlers should interpret encryption.
|
||||
EncryptionEnabled func() bool
|
||||
// Returns true if handlers allow SSE-KMS encryption headers.
|
||||
AllowSSEKMS func() bool
|
||||
}
|
||||
|
||||
// getHost tries its best to return the request host.
|
||||
// According to section 14.23 of RFC 2616 the Host header
|
||||
// can include the port number if the default value of 80 is not used.
|
||||
func getHost(r *http.Request) string {
|
||||
if r.URL.IsAbs() {
|
||||
return r.URL.Host
|
||||
}
|
||||
return r.Host
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) {
|
||||
func registerAPIRouter(router *mux.Router) {
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
CacheAPI: newCachedObjectLayerFn,
|
||||
EncryptionEnabled: func() bool {
|
||||
return encryptionEnabled
|
||||
},
|
||||
AllowSSEKMS: func() bool {
|
||||
return allowSSEKMS
|
||||
},
|
||||
}
|
||||
|
||||
// API Router
|
||||
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
|
||||
|
||||
var routers []*mux.Router
|
||||
for _, domainName := range globalDomainNames {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
if IsKubernetes() {
|
||||
routers = append(routers, apiRouter.MatcherFunc(func(r *http.Request, match *mux.RouteMatch) bool {
|
||||
host, _, _ := net.SplitHostPort(getHost(r))
|
||||
// Make sure to skip matching minio.<domain>` this is
|
||||
// specifically meant for operator/k8s deployment
|
||||
// The reason we need to skip this is for a special
|
||||
// usecase where we need to make sure that
|
||||
// minio.<namespace>.svc.<cluster_domain> is ignored
|
||||
// by the bucketDNS style to ensure that path style
|
||||
// is available and honored at this domain.
|
||||
//
|
||||
// All other `<bucket>.<namespace>.svc.<cluster_domain>`
|
||||
// makes sure that buckets are routed through this matcher
|
||||
// to match for `<bucket>`
|
||||
return host != minioReservedBucket+"."+domainName
|
||||
}).Host("{bucket:.+}."+domainName).Subrouter())
|
||||
} else {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
}
|
||||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
@@ -94,7 +114,10 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
|
||||
// CopyObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).
|
||||
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
@@ -138,7 +161,8 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
|
||||
// CopyObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
|
||||
|
||||
@@ -346,8 +346,26 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
|
||||
// verify as a fallback.
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
@@ -360,6 +378,22 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
|
||||
// verify as a fallback.
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(policy.ListBucketAction),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
@@ -55,19 +54,20 @@ func (h *healRoutine) queueHealTask(task healTask) {
|
||||
h.tasks <- task
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq(tolerance int32) {
|
||||
func waitForLowHTTPReq(tolerance int32, maxWait time.Duration) {
|
||||
const wait = 10 * time.Millisecond
|
||||
waitCount := maxWait / wait
|
||||
|
||||
// Bucket notification and http trace are not costly, it is okay to ignore them
|
||||
// while counting the number of concurrent connections
|
||||
tolerance += int32(globalHTTPListen.NumSubscribers() + globalHTTPTrace.NumSubscribers())
|
||||
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||
waitCount := 600
|
||||
// Any requests in progress, delay the heal.
|
||||
for (httpServer.GetRequestCount() >= tolerance) &&
|
||||
waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
time.Sleep(wait)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -82,7 +82,7 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()))
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()), time.Second)
|
||||
|
||||
var res madmin.HealResultItem
|
||||
var err error
|
||||
@@ -100,6 +100,7 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
ObjectPathUpdated(path.Join(task.bucket, task.object))
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
|
||||
case <-h.doneCh:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
@@ -116,24 +117,6 @@ func newHealRoutine() *healRoutine {
|
||||
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
nh := newBgHealSequence()
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
if err := nh.healDiskMeta(); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(nh.ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(nh)
|
||||
}
|
||||
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
@@ -145,24 +128,5 @@ func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpt
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers if healing is not required.
|
||||
if err == nil {
|
||||
// Notify servers in background and retry if needed.
|
||||
go func() {
|
||||
retry:
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(opts.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
if nerr.Err.Error() == errServerNotInitialized.Error() {
|
||||
time.Sleep(time.Second)
|
||||
goto retry
|
||||
}
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -18,27 +18,24 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
const defaultMonitorNewDiskInterval = time.Minute * 3
|
||||
const defaultMonitorNewDiskInterval = time.Second * 10
|
||||
|
||||
func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
go monitorLocalDisksAndHeal(ctx, objAPI)
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
z, ok := objAPI.(*erasureZones)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
initBackgroundHealing(ctx, objAPI) // start quick background healing
|
||||
|
||||
var bgSeq *healSequence
|
||||
var found bool
|
||||
|
||||
@@ -50,82 +47,119 @@ func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
for _, ep := range getLocalDisksToHeal() {
|
||||
globalBackgroundHealState.pushHealLocalDisks(ep)
|
||||
}
|
||||
|
||||
if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
|
||||
drivesToHeal, defaultMonitorNewDiskInterval))
|
||||
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
if err := bgSeq.healDiskMeta(); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
for _, ep := range globalEndpoints {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, endpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
return disksToHeal
|
||||
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence())
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureZones, bgSeq *healSequence) {
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(defaultMonitorNewDiskInterval):
|
||||
// Attempt a heal as the server starts-up first.
|
||||
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
|
||||
var healNewDisks bool
|
||||
for i, ep := range globalEndpoints {
|
||||
localDisksToHeal := Endpoints{}
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
if err == errUnformattedDisk {
|
||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||
}
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()), time.Second)
|
||||
|
||||
var erasureSetInZoneEndpointToHeal []map[int]Endpoints
|
||||
healDisks := globalBackgroundHealState.getHealLocalDisks()
|
||||
if len(healDisks) > 0 {
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{bucket: nopHeal}
|
||||
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
len(healDisks)))
|
||||
|
||||
erasureSetInZoneEndpointToHeal = make([]map[int]Endpoints, len(z.zones))
|
||||
for i := range z.zones {
|
||||
erasureSetInZoneEndpointToHeal[i] = map[int]Endpoints{}
|
||||
}
|
||||
if len(localDisksToHeal) == 0 {
|
||||
}
|
||||
|
||||
// heal only if new disks found.
|
||||
for _, endpoint := range healDisks {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, true)
|
||||
continue
|
||||
}
|
||||
localDisksInZoneHeal[i] = localDisksToHeal
|
||||
healNewDisks = true
|
||||
}
|
||||
|
||||
// Reformat disks only if needed.
|
||||
if !healNewDisks {
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("New unformatted drives detected attempting to heal...")
|
||||
for i, disks := range localDisksInZoneHeal {
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%s' on %s zone", disk, humanize.Ordinal(i+1))
|
||||
zoneIdx := globalEndpoints.GetLocalZoneIdx(endpoint)
|
||||
if zoneIdx < 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{bucket: nopHeal}
|
||||
|
||||
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
|
||||
// Compute the list of erasure set to heal
|
||||
for i, localDisksToHeal := range localDisksInZoneHeal {
|
||||
var erasureSetToHeal []int
|
||||
for _, endpoint := range localDisksToHeal {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, true)
|
||||
continue
|
||||
}
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, false)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[zoneIdx].format, format)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, false)
|
||||
continue
|
||||
}
|
||||
erasureSetInZoneToHeal[i] = erasureSetToHeal
|
||||
|
||||
erasureSetInZoneEndpointToHeal[zoneIdx][setIndex] = append(erasureSetInZoneEndpointToHeal[zoneIdx][setIndex], endpoint)
|
||||
}
|
||||
|
||||
// Heal all erasure sets that need
|
||||
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
|
||||
for _, setIndex := range erasureSetToHeal {
|
||||
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].drivesPerSet)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
for i, setMap := range erasureSetInZoneEndpointToHeal {
|
||||
for setIndex, endpoints := range setMap {
|
||||
for _, ep := range endpoints {
|
||||
logger.Info("Healing disk '%s' on %s zone", ep, humanize.Ordinal(i+1))
|
||||
|
||||
if err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].setDriveCount); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(ep)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
@@ -80,7 +81,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
err := disk.CreateFile(volume, filePath, totalFileSize, r)
|
||||
err := disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r)
|
||||
r.CloseWithError(err)
|
||||
close(bw.canClose)
|
||||
}()
|
||||
@@ -118,7 +119,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
// For the first ReadAt() call we need to open the stream for reading.
|
||||
b.currOffset = offset
|
||||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
||||
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -139,8 +140,8 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err := &errHashMismatch{fmt.Sprintf("hashes do not match expected %s, got %s",
|
||||
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
err := &errHashMismatch{fmt.Sprintf("Disk: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
@@ -33,14 +35,14 @@ type wholeBitrotWriter struct {
|
||||
}
|
||||
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
err := b.disk.AppendFile(b.volume, b.filePath, p)
|
||||
err := b.disk.AppendFile(context.TODO(), b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
_, err = b.Hash.Write(p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
@@ -68,15 +70,13 @@ type wholeBitrotReader struct {
|
||||
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) {
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
ctx := GlobalContext
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
|
||||
logger.LogIf(ctx, err)
|
||||
if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(GlobalContext, errLessData)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
@@ -34,12 +35,12 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
volume := "testvol"
|
||||
filePath := "testfile"
|
||||
|
||||
disk, err := newXLStorage(tmpDir, "")
|
||||
disk, err := newLocalXLStorage(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
disk.MakeVol(volume)
|
||||
disk.MakeVol(context.Background(), volume)
|
||||
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
|
||||
|
||||
|
||||
@@ -131,7 +131,7 @@ func (client *bootstrapRESTClient) callWithContext(ctx context.Context, method s
|
||||
values = make(url.Values)
|
||||
}
|
||||
|
||||
respBody, err = client.restClient.CallWithContext(ctx, method, values, body, length)
|
||||
respBody, err = client.restClient.Call(ctx, method, values, body, length)
|
||||
if err == nil {
|
||||
return respBody, nil
|
||||
}
|
||||
@@ -178,17 +178,22 @@ func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones)
|
||||
}
|
||||
onlineServers++
|
||||
}
|
||||
// Sleep for a while - so that we don't go into
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
retries++
|
||||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
// Sleep for a while - so that we don't go into
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
retries++
|
||||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -228,13 +233,13 @@ func newBootstrapRESTClient(endpoint Endpoint) *bootstrapRESTClient {
|
||||
}
|
||||
}
|
||||
|
||||
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)
|
||||
trFn := newInternodeHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)
|
||||
restClient := rest.NewClient(serverURL, trFn, newAuthToken)
|
||||
restClient.HealthCheckFn = func() bool {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, restClient.HealthCheckTimeout)
|
||||
// Instantiate a new rest client for healthcheck
|
||||
// to avoid recursive healthCheckFn()
|
||||
respBody, err := rest.NewClient(serverURL, trFn, newAuthToken).CallWithContext(ctx, bootstrapRESTMethodHealth, nil, nil, -1)
|
||||
respBody, err := rest.NewClient(serverURL, trFn, newAuthToken).Call(ctx, bootstrapRESTMethodHealth, nil, nil, -1)
|
||||
xhttp.DrainBody(respBody)
|
||||
cancel()
|
||||
var ne *rest.NetworkError
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -72,7 +72,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
|
||||
// Get buckets in the DNS
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && err != dns.ErrNoEntriesFound {
|
||||
if err != nil && err != dns.ErrNoEntriesFound && err != dns.ErrNotImplemented {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return
|
||||
}
|
||||
@@ -80,33 +80,35 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
bucketsSet := set.NewStringSet()
|
||||
bucketsToBeUpdated := set.NewStringSet()
|
||||
bucketsInConflict := set.NewStringSet()
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
r, ok := dnsBuckets[bucket.Name]
|
||||
if !ok {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
// No difference in terms of domainIPs and nothing
|
||||
// has changed so we don't change anything on the etcd.
|
||||
if dnsBuckets != nil {
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
r, ok := dnsBuckets[bucket.Name]
|
||||
if !ok {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
// if domain IPs intersect then it won't be an empty set.
|
||||
// such an intersection means that bucket exists on etcd.
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
// No difference in terms of domainIPs and nothing
|
||||
// has changed so we don't change anything on the etcd.
|
||||
continue
|
||||
}
|
||||
// if domain IPs intersect then it won't be an empty set.
|
||||
// such an intersection means that bucket exists on etcd.
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
// No IPs seem to intersect, this means that bucket exists but has
|
||||
// different IP addresses perhaps from a different deployment.
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
bucketsInConflict.Add(bucket.Name)
|
||||
}
|
||||
// No IPs seem to intersect, this means that bucket exists but has
|
||||
// different IP addresses perhaps from a different deployment.
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
bucketsInConflict.Add(bucket.Name)
|
||||
}
|
||||
|
||||
// Add/update buckets that are not registered with the DNS
|
||||
@@ -449,14 +451,15 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
deleteList := toNames(objectsToDelete)
|
||||
dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
})
|
||||
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
for i := range errs {
|
||||
dindex := objectsToDelete[deleteList[i]]
|
||||
apiErr := toAPIError(ctx, errs[i])
|
||||
if apiErr.Code == "" || apiErr.Code == "NoSuchKey" {
|
||||
if apiErr.Code == "" || apiErr.Code == "NoSuchKey" || apiErr.Code == "InvalidArgument" {
|
||||
deletedObjects[dindex] = dObjects[i]
|
||||
continue
|
||||
}
|
||||
@@ -561,7 +564,9 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
if globalDNSConfig != nil {
|
||||
sr, err := globalDNSConfig.Get(bucket)
|
||||
if err != nil {
|
||||
if err == dns.ErrNoEntriesFound {
|
||||
// ErrNotImplemented indicates a DNS backend that doesn't need to check if bucket already
|
||||
// exists elsewhere
|
||||
if err == dns.ErrNoEntriesFound || err == dns.ErrNotImplemented {
|
||||
// Proceed to creating a bucket.
|
||||
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
@@ -654,16 +659,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if !api.EncryptionEnabled() && crypto.IsRequested(r.Header) {
|
||||
|
||||
if !objectAPI.IsEncryptionSupported() && crypto.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
|
||||
// To detect if the client has disconnected.
|
||||
r.Body = &contextReader{r.Body, r.Context()}
|
||||
|
||||
// Require Content-Length to be set in the request
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
@@ -762,7 +765,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Make sure formValues adhere to policy restrictions.
|
||||
if err = checkPostPolicy(formValues, postPolicyForm); err != nil {
|
||||
writeCustomErrorResponseXML(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), err.Error(), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrAccessDenied, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -804,7 +807,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if _, err = globalBucketSSEConfigSys.Get(bucket); err == nil || globalAutoEncryption {
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
r.Header.Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -999,7 +1002,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually using etcdctl", err))
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually", err))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -24,12 +24,36 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
|
||||
inParallel := func(objects []ObjectInfo) {
|
||||
g := errgroup.WithNErrs(len(objects))
|
||||
for index := range objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
objects[index].ETag = objects[index].GetActualETag(nil)
|
||||
objects[index].Size, _ = objects[index].GetActualSize()
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
g.Wait()
|
||||
}
|
||||
const maxConcurrent = 500
|
||||
for {
|
||||
if len(objects) < maxConcurrent {
|
||||
inParallel(objects)
|
||||
return
|
||||
}
|
||||
inParallel(objects[:maxConcurrent])
|
||||
objects = objects[maxConcurrent:]
|
||||
}
|
||||
}
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
// if one of the args do not meet the required conditions.
|
||||
// Special conditions required by MinIO server are as below
|
||||
@@ -69,7 +93,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketVersionsAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -89,6 +113,10 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if proxyRequestByBucket(ctx, w, r, bucket) {
|
||||
return
|
||||
}
|
||||
|
||||
listObjectVersions := objectAPI.ListObjectVersions
|
||||
|
||||
// Inititate a list object versions operation based on the input params.
|
||||
@@ -100,16 +128,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectVersionsInfo.Objects {
|
||||
if crypto.IsEncrypted(listObjectVersionsInfo.Objects[i].UserDefined) {
|
||||
listObjectVersionsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectVersionsInfo.Objects[i], false)
|
||||
}
|
||||
listObjectVersionsInfo.Objects[i].Size, err = listObjectVersionsInfo.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
concurrentDecryptETag(ctx, listObjectVersionsInfo.Objects)
|
||||
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo)
|
||||
|
||||
@@ -178,16 +197,7 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsV2Info.Objects {
|
||||
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
|
||||
}
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
@@ -264,16 +274,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsV2Info.Objects {
|
||||
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
|
||||
}
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
@@ -396,16 +397,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsInfo.Objects {
|
||||
if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
||||
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
|
||||
}
|
||||
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
concurrentDecryptETag(ctx, listObjectsInfo.Objects)
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
@@ -80,7 +81,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
// This code is needed only for gateway implementations.
|
||||
switch configFile {
|
||||
case bucketSSEConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -89,7 +90,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketLifecycleConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -98,7 +99,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketTaggingConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -107,7 +108,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketNotificationConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -148,15 +149,27 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
meta.EncryptionConfigXML = configData
|
||||
case bucketTaggingConfig:
|
||||
meta.TaggingConfigXML = configData
|
||||
case objectLockConfig:
|
||||
meta.ObjectLockConfigXML = configData
|
||||
case bucketVersioningConfig:
|
||||
meta.VersioningConfigXML = configData
|
||||
case bucketQuotaConfigFile:
|
||||
meta.QuotaConfigJSON = configData
|
||||
case objectLockConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.ObjectLockConfigXML = configData
|
||||
case bucketVersioningConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.VersioningConfigXML = configData
|
||||
case bucketReplicationConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.ReplicationConfigXML = configData
|
||||
case bucketTargetsFile:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.BucketTargetsConfigJSON = configData
|
||||
default:
|
||||
return fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
@@ -176,6 +189,13 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
// If no metadata exists errConfigNotFound is returned and a new metadata is returned.
|
||||
// Only a shallow copy is returned, so referenced data should not be modified,
|
||||
// but can be replaced atomically.
|
||||
//
|
||||
// This function should only be used with
|
||||
// - GetBucketInfo
|
||||
// - ListBuckets
|
||||
// - ListBucketsHeal (only in case of erasure coding mode)
|
||||
// For all other bucket specific metadata, use the relevant
|
||||
// calls implemented specifically for each of those features.
|
||||
func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
|
||||
if globalIsGateway || bucket == minioMetaBucket {
|
||||
return newBucketMetadata(bucket), errConfigNotFound
|
||||
@@ -253,7 +273,7 @@ func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Life
|
||||
// GetNotificationConfig returns configured notification config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Config, error) {
|
||||
if globalIsGateway && globalGatewayName == "nas" {
|
||||
if globalIsGateway && globalGatewayName == NASBackendGateway {
|
||||
// Only needed in case of NAS gateway.
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
@@ -397,8 +417,9 @@ func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, ob
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load PolicySys once during boot.
|
||||
return sys.load(ctx, buckets, objAPI)
|
||||
// Load bucket metadata sys in background
|
||||
go logger.LogIf(ctx, sys.load(ctx, buckets, objAPI))
|
||||
return nil
|
||||
}
|
||||
|
||||
// concurrently load bucket metadata to speed up loading bucket metadata.
|
||||
|
||||
@@ -265,6 +265,12 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectExistsAsDirectory:
|
||||
// in FS mode it possible that we have actual
|
||||
// files in this folder with `.minio.sys/buckets/bucket/configFile`
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
// legacy file config not found, proceed to look for new metadata.
|
||||
continue
|
||||
@@ -348,7 +354,7 @@ func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
|
||||
|
||||
// deleteBucketMetadata deletes bucket metadata
|
||||
// If config does not exist no error is returned.
|
||||
func deleteBucketMetadata(ctx context.Context, obj ObjectLayer, bucket string) error {
|
||||
func deleteBucketMetadata(ctx context.Context, obj objectDeleter, bucket string) error {
|
||||
metadataFiles := []string{
|
||||
dataUsageCacheName,
|
||||
bucketMetadataFile,
|
||||
|
||||
@@ -68,6 +68,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
principalType := "Anonymous"
|
||||
if username != "" {
|
||||
principalType = "User"
|
||||
if len(claims) > 0 {
|
||||
principalType = "AssumedRole"
|
||||
}
|
||||
if username == globalActiveCred.AccessKey {
|
||||
principalType = "Account"
|
||||
}
|
||||
}
|
||||
|
||||
vid := r.URL.Query().Get("versionId")
|
||||
@@ -143,7 +149,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
for k, v := range claims {
|
||||
vStr, ok := v.(string)
|
||||
if ok {
|
||||
args[k] = []string{vStr}
|
||||
// Special case for AD/LDAP STS users
|
||||
if k == ldapUser {
|
||||
args["user"] = []string{vStr}
|
||||
} else {
|
||||
args[k] = []string{vStr}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,10 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
@@ -49,15 +52,23 @@ func getReplicationConfig(ctx context.Context, bucketName string) (rc *replicati
|
||||
// validateReplicationDestination returns error if replication destination bucket missing or not configured
|
||||
// It also returns true if replication destination is same as this server.
|
||||
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config) (bool, error) {
|
||||
clnt := globalBucketTargetSys.GetReplicationTargetClient(ctx, rCfg.ReplicationArn)
|
||||
clnt := globalBucketTargetSys.GetReplicationTargetClient(ctx, rCfg.RoleArn)
|
||||
if clnt == nil {
|
||||
return false, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
if found, _ := clnt.BucketExists(ctx, rCfg.GetDestination().Bucket); !found {
|
||||
return false, BucketReplicationDestinationNotFound{Bucket: rCfg.GetDestination().Bucket}
|
||||
}
|
||||
if ret, err := globalBucketObjectLockSys.Get(bucket); err == nil {
|
||||
if ret.LockEnabled {
|
||||
lock, _, _, _, err := clnt.GetObjectLockConfig(ctx, rCfg.GetDestination().Bucket)
|
||||
if err != nil || lock != "Enabled" {
|
||||
return false, BucketReplicationDestinationMissingLock{Bucket: rCfg.GetDestination().Bucket}
|
||||
}
|
||||
}
|
||||
}
|
||||
// validate replication ARN against target endpoint
|
||||
c, ok := globalBucketTargetSys.arnRemotesMap[rCfg.ReplicationArn]
|
||||
c, ok := globalBucketTargetSys.arnRemotesMap[rCfg.RoleArn]
|
||||
if ok {
|
||||
if c.EndpointURL().String() == clnt.EndpointURL().String() {
|
||||
sameTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
|
||||
@@ -67,8 +78,23 @@ func validateReplicationDestination(ctx context.Context, bucket string, rCfg *re
|
||||
return false, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
func mustReplicateWeb(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string, permErr APIErrorCode) bool {
|
||||
if permErr != ErrNone {
|
||||
return false
|
||||
}
|
||||
return mustReplicater(ctx, r, bucket, object, meta, replStatus)
|
||||
}
|
||||
|
||||
// mustReplicate returns true if object meets replication criteria.
|
||||
func mustReplicate(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string) bool {
|
||||
if s3Err := isPutActionAllowed(getRequestAuthType(r), bucket, object, r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
|
||||
return false
|
||||
}
|
||||
return mustReplicater(ctx, r, bucket, object, meta, replStatus)
|
||||
}
|
||||
|
||||
// mustReplicater returns true if object meets replication criteria.
|
||||
func mustReplicater(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string) bool {
|
||||
if globalIsGateway {
|
||||
return false
|
||||
}
|
||||
@@ -78,9 +104,6 @@ func mustReplicate(ctx context.Context, r *http.Request, bucket, object string,
|
||||
if replication.StatusType(replStatus) == replication.Replica {
|
||||
return false
|
||||
}
|
||||
if s3Err := isPutActionAllowed(getRequestAuthType(r), bucket, object, r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
|
||||
return false
|
||||
}
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
return false
|
||||
@@ -102,22 +125,29 @@ func putReplicationOpts(dest replication.Destination, objInfo ObjectInfo) (putOp
|
||||
if k == xhttp.AmzBucketReplicationStatus {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
continue
|
||||
}
|
||||
meta[k] = v
|
||||
}
|
||||
|
||||
tag, err := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
sc := dest.StorageClass
|
||||
if sc == "" {
|
||||
sc = objInfo.StorageClass
|
||||
}
|
||||
putOpts = miniogo.PutObjectOptions{
|
||||
UserMetadata: meta,
|
||||
UserTags: tag.ToMap(),
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: dest.StorageClass,
|
||||
StorageClass: sc,
|
||||
ReplicationVersionID: objInfo.VersionID,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
ReplicationMTime: objInfo.ModTime,
|
||||
ReplicationETag: objInfo.ETag,
|
||||
}
|
||||
if mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {
|
||||
rmode := miniogo.RetentionMode(mode)
|
||||
@@ -141,38 +171,53 @@ func putReplicationOpts(dest replication.Destination, objInfo ObjectInfo) (putOp
|
||||
|
||||
// replicateObject replicates the specified version of the object to destination bucket
|
||||
// The source object is then updated to reflect the replication status.
|
||||
func replicateObject(ctx context.Context, bucket, object, versionID string, objectAPI ObjectLayer, eventArg *eventArgs, healPending bool) {
|
||||
func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLayer) {
|
||||
bucket := objInfo.Bucket
|
||||
object := objInfo.Name
|
||||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetReplicationTargetClient(ctx, cfg.ReplicationArn)
|
||||
|
||||
tgt := globalBucketTargetSys.GetReplicationTargetClient(ctx, cfg.RoleArn)
|
||||
if tgt == nil {
|
||||
return
|
||||
}
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer gr.Close()
|
||||
objInfo := gr.ObjInfo
|
||||
|
||||
objInfo = gr.ObjInfo
|
||||
size, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
gr.Close()
|
||||
return
|
||||
}
|
||||
|
||||
dest := cfg.GetDestination()
|
||||
if dest.Bucket == "" {
|
||||
gr.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// if heal encounters a pending replication status, either replication
|
||||
// has failed due to server shutdown or crawler and PutObject replication are in contention.
|
||||
healPending := objInfo.ReplicationStatus == replication.Pending
|
||||
|
||||
// In the rare event that replication is in pending state either due to
|
||||
// server shut down/crash before replication completed or healing and PutObject
|
||||
// race - do an additional stat to see if the version ID exists
|
||||
if healPending {
|
||||
_, err := tgt.StatObject(ctx, dest.Bucket, object, miniogo.StatObjectOptions{VersionID: objInfo.VersionID})
|
||||
if err == nil {
|
||||
gr.Close()
|
||||
// object with same VersionID already exists, replication kicked off by
|
||||
// PutObject might have completed.
|
||||
return
|
||||
@@ -182,28 +227,114 @@ func replicateObject(ctx context.Context, bucket, object, versionID string, obje
|
||||
|
||||
replicationStatus := replication.Complete
|
||||
_, err = tgt.PutObject(ctx, dest.Bucket, object, gr, size, "", "", putOpts)
|
||||
gr.Close()
|
||||
if err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
// Notify replication failure event.
|
||||
if eventArg == nil {
|
||||
eventArg = &eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
}
|
||||
}
|
||||
eventArg.EventName = event.OperationReplicationFailed
|
||||
eventArg.Object.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||
sendEvent(*eventArg)
|
||||
}
|
||||
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||
if objInfo.UserTags != "" {
|
||||
objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
}
|
||||
|
||||
// FIXME: add support for missing replication events
|
||||
// - event.ObjectReplicationNotTracked
|
||||
// - event.ObjectReplicationMissedThreshold
|
||||
// - event.ObjectReplicationReplicatedAfterThreshold
|
||||
if replicationStatus == replication.Failed {
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationFailed,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
}
|
||||
|
||||
objInfo.metadataOnly = true // Perform only metadata updates.
|
||||
if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
}, ObjectOptions{VersionID: objInfo.VersionID}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s: %s", objInfo.VersionID, err))
|
||||
}
|
||||
}
|
||||
|
||||
// filterReplicationStatusMetadata filters replication status metadata for COPY
|
||||
func filterReplicationStatusMetadata(metadata map[string]string) map[string]string {
|
||||
// Copy on write
|
||||
dst := metadata
|
||||
var copied bool
|
||||
delKey := func(key string) {
|
||||
if _, ok := metadata[key]; !ok {
|
||||
return
|
||||
}
|
||||
if !copied {
|
||||
dst = make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
dst[k] = v
|
||||
}
|
||||
copied = true
|
||||
}
|
||||
delete(dst, key)
|
||||
}
|
||||
|
||||
delKey(xhttp.AmzBucketReplicationStatus)
|
||||
return dst
|
||||
}
|
||||
|
||||
type replicationState struct {
|
||||
// add future metrics here
|
||||
replicaCh chan ObjectInfo
|
||||
}
|
||||
|
||||
func (r *replicationState) queueReplicaTask(oi ObjectInfo) {
|
||||
select {
|
||||
case r.replicaCh <- oi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalReplicationState *replicationState
|
||||
// TODO: currently keeping it conservative
|
||||
// but eventually can be tuned in future,
|
||||
// take only half the CPUs for replication
|
||||
// conservatively.
|
||||
globalReplicationConcurrent = runtime.GOMAXPROCS(0) / 2
|
||||
)
|
||||
|
||||
func newReplicationState() *replicationState {
|
||||
return &replicationState{
|
||||
replicaCh: make(chan ObjectInfo, globalReplicationConcurrent*2),
|
||||
}
|
||||
}
|
||||
|
||||
// addWorker creates a new worker to process tasks
|
||||
func (r *replicationState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
// Add a new worker.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
close(r.replicaCh)
|
||||
return
|
||||
case oi, ok := <-r.replicaCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateObject(ctx, oi, objectAPI)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
|
||||
if globalReplicationState == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start with globalReplicationConcurrent.
|
||||
for i := 0; i < globalReplicationConcurrent; i++ {
|
||||
globalReplicationState.addWorker(ctx, objectAPI)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,10 +18,10 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
@@ -36,16 +36,42 @@ type BucketTargetSys struct {
|
||||
clientsCache map[string]*miniogo.Core
|
||||
}
|
||||
|
||||
// ListTargets - gets list of bucket targets for this bucket.
|
||||
func (sys *BucketTargetSys) ListTargets(ctx context.Context, bucket string) (*madmin.BucketTargets, error) {
|
||||
if globalIsGateway {
|
||||
return nil, nil
|
||||
// ListTargets lists bucket targets across tenant or for individual bucket, and returns
|
||||
// results filtered by arnType
|
||||
func (sys *BucketTargetSys) ListTargets(ctx context.Context, bucket, arnType string) (targets []madmin.BucketTarget) {
|
||||
if bucket != "" {
|
||||
if ts, err := sys.ListBucketTargets(ctx, bucket); err == nil {
|
||||
for _, t := range ts.Targets {
|
||||
if string(t.Type) == arnType || arnType == "" {
|
||||
targets = append(targets, t.Clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
return targets
|
||||
}
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
for _, tgts := range sys.targetsMap {
|
||||
for _, t := range tgts {
|
||||
if string(t.Type) == arnType || arnType == "" {
|
||||
targets = append(targets, t.Clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListBucketTargets - gets list of bucket targets for this bucket.
|
||||
func (sys *BucketTargetSys) ListBucketTargets(ctx context.Context, bucket string) (*madmin.BucketTargets, error) {
|
||||
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
|
||||
tgts, ok := sys.targetsMap[bucket]
|
||||
if ok {
|
||||
return &madmin.BucketTargets{Targets: tgts}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("No remote targets exist for bucket %s", bucket)
|
||||
return nil, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
// SetTarget - sets a new minio-go client target for this bucket.
|
||||
@@ -60,13 +86,18 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
if err != nil {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
|
||||
if tgt.Type == madmin.ReplicationArn {
|
||||
if tgt.Type == madmin.ReplicationService {
|
||||
if !globalBucketVersioningSys.Enabled(bucket) {
|
||||
return BucketReplicationSourceNotVersioned{Bucket: bucket}
|
||||
}
|
||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||
if err != nil || vcfg.Status != string(versioning.Enabled) {
|
||||
if isErrBucketNotFound(err) {
|
||||
if err != nil {
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketReplicationTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
}
|
||||
@@ -112,10 +143,10 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
if err != nil {
|
||||
return BucketRemoteArnInvalid{Bucket: bucket}
|
||||
}
|
||||
if arn.Type == madmin.ReplicationArn {
|
||||
if arn.Type == madmin.ReplicationService {
|
||||
// reject removal of remote target if replication configuration is present
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err == nil && rcfg.ReplicationArn == arnStr {
|
||||
if err == nil && rcfg.RoleArn == arnStr {
|
||||
if _, ok := sys.arnRemotesMap[arnStr]; ok {
|
||||
return BucketRemoteRemoveDisallowed{Bucket: bucket}
|
||||
}
|
||||
@@ -125,11 +156,17 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
targets := make([]madmin.BucketTarget, 0)
|
||||
found := false
|
||||
tgts := sys.targetsMap[bucket]
|
||||
for _, tgt := range tgts {
|
||||
if tgt.Arn != arnStr {
|
||||
targets = append(targets, tgt)
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
}
|
||||
if !found {
|
||||
return BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
sys.targetsMap[bucket] = targets
|
||||
delete(sys.arnRemotesMap, arnStr)
|
||||
@@ -163,11 +200,45 @@ func (sys *BucketTargetSys) Init(ctx context.Context, buckets []BucketInfo, objA
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load bucket targets once during boot.
|
||||
sys.load(ctx, buckets, objAPI)
|
||||
// Load bucket targets once during boot in background.
|
||||
go sys.load(ctx, buckets, objAPI)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateTarget updates target to reflect metadata updates
|
||||
func (sys *BucketTargetSys) UpdateTarget(bucket string, cfg *madmin.BucketTargets) {
|
||||
if sys == nil {
|
||||
return
|
||||
}
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
if cfg == nil || cfg.Empty() {
|
||||
// remove target and arn association
|
||||
if tgts, ok := sys.targetsMap[bucket]; ok {
|
||||
for _, t := range tgts {
|
||||
delete(sys.arnRemotesMap, t.Arn)
|
||||
}
|
||||
}
|
||||
delete(sys.targetsMap, bucket)
|
||||
return
|
||||
}
|
||||
|
||||
if len(cfg.Targets) > 0 {
|
||||
sys.targetsMap[bucket] = cfg.Targets
|
||||
}
|
||||
for _, tgt := range cfg.Targets {
|
||||
tgtClient, err := sys.getRemoteTargetClient(&tgt)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
||||
if _, ok := sys.clientsCache[tgtClient.EndpointURL().String()]; !ok {
|
||||
sys.clientsCache[tgtClient.EndpointURL().String()] = tgtClient
|
||||
}
|
||||
}
|
||||
sys.targetsMap[bucket] = cfg.Targets
|
||||
}
|
||||
|
||||
// create minio-go clients for buckets having remote targets
|
||||
func (sys *BucketTargetSys) load(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) {
|
||||
for _, bucket := range buckets {
|
||||
@@ -229,7 +300,7 @@ func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTar
|
||||
return tgt.Arn
|
||||
}
|
||||
}
|
||||
if !madmin.ArnType(target.Type).IsValid() {
|
||||
if !madmin.ServiceType(target.Type).IsValid() {
|
||||
return ""
|
||||
}
|
||||
arn := madmin.ARN{
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -42,15 +43,11 @@ func init() {
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
logger.RegisterError(config.FmtError)
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(GlobalContext)
|
||||
logger.AddTarget(globalConsoleSys)
|
||||
|
||||
gob.Register(StorageErr(""))
|
||||
}
|
||||
|
||||
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
|
||||
if (globalAutoEncryption || GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
if (GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Encryption support is requested but '%s' does not support encryption", name)
|
||||
}
|
||||
@@ -293,7 +290,7 @@ func logStartupMessage(msg string) {
|
||||
logger.StartupMessage(msg)
|
||||
}
|
||||
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn bool, err error) {
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
|
||||
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
|
||||
return nil, nil, false, nil
|
||||
}
|
||||
@@ -302,11 +299,62 @@ func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn b
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
c, err = certs.New(getPublicCertFile(), getPrivateKeyFile(), config.LoadX509KeyPair)
|
||||
manager, err = certs.NewManager(GlobalContext, getPublicCertFile(), getPrivateKeyFile(), config.LoadX509KeyPair)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
// MinIO has support for multiple certificates. It expects the following structure:
|
||||
// certs/
|
||||
// │
|
||||
// ├─ public.crt
|
||||
// ├─ private.key
|
||||
// │
|
||||
// ├─ example.com/
|
||||
// │ │
|
||||
// │ ├─ public.crt
|
||||
// │ └─ private.key
|
||||
// └─ foobar.org/
|
||||
// │
|
||||
// ├─ public.crt
|
||||
// └─ private.key
|
||||
// ...
|
||||
//
|
||||
// Therefore, we read all filenames in the cert directory and check
|
||||
// for each directory whether it contains a public.crt and private.key.
|
||||
// If so, we try to add it to certificate manager.
|
||||
root, err := os.Open(globalCertsDir.Get())
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
defer root.Close()
|
||||
|
||||
files, err := root.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
for _, file := range files {
|
||||
// We exclude any regular file and the "CAs/" directory.
|
||||
// The "CAs/" directory contains (root) CA certificates
|
||||
// that MinIO adds to its list of trusted roots (tls.Config.RootCAs).
|
||||
// Therefore, "CAs/" does not contain X.509 certificates that
|
||||
// are meant to be served by MinIO.
|
||||
if !file.IsDir() || file.Name() == "CAs" {
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
certFile = filepath.Join(root.Name(), file.Name(), publicCertFile)
|
||||
keyFile = filepath.Join(root.Name(), file.Name(), privateKeyFile)
|
||||
)
|
||||
if !isFile(certFile) || !isFile(keyFile) {
|
||||
continue
|
||||
}
|
||||
if err := manager.AddCertificate(certFile, keyFile); err != nil {
|
||||
err = fmt.Errorf("Failed to load TLS certificate '%s': %v", certFile, err)
|
||||
logger.LogIf(GlobalContext, err, logger.Minio)
|
||||
}
|
||||
}
|
||||
secureConn = true
|
||||
return x509Certs, c, secureConn, nil
|
||||
return x509Certs, manager, secureConn, nil
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
@@ -36,8 +35,6 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -49,7 +46,11 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
type objectDeleter interface {
|
||||
DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error)
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI objectDeleter, configFile string) error {
|
||||
_, err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile, ObjectOptions{})
|
||||
if err != nil && isErrObjectNotFound(err) {
|
||||
return errConfigNotFound
|
||||
@@ -74,8 +75,6 @@ func checkConfig(ctx context.Context, objAPI ObjectLayer, configFile string) err
|
||||
return errConfigNotFound
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -25,8 +25,9 @@ import (
|
||||
"github.com/minio/minio/cmd/config/api"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
"github.com/minio/minio/cmd/config/crawler"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/config/etcd"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
xldap "github.com/minio/minio/cmd/config/identity/ldap"
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
@@ -55,6 +56,7 @@ func initHelp() {
|
||||
config.KmsKesSubSys: crypto.DefaultKesKVS,
|
||||
config.LoggerWebhookSubSys: logger.DefaultKVS,
|
||||
config.AuditWebhookSubSys: logger.DefaultAuditKVS,
|
||||
config.CrawlerSubSys: crawler.DefaultKVS,
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
kvs[k] = v
|
||||
@@ -106,6 +108,10 @@ func initHelp() {
|
||||
Key: config.APISubSys,
|
||||
Description: "manage global HTTP API call specific features, such as throttling, authentication types, etc.",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CrawlerSubSys,
|
||||
Description: "manage continuous disk crawling for bucket disk usage, lifecycle, quota and data integrity checks",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.LoggerWebhookSubSys,
|
||||
Description: "send server logs to webhook endpoints",
|
||||
@@ -185,6 +191,7 @@ func initHelp() {
|
||||
config.EtcdSubSys: etcd.Help,
|
||||
config.CacheSubSys: cache.Help,
|
||||
config.CompressionSubSys: compress.Help,
|
||||
config.CrawlerSubSys: crawler.Help,
|
||||
config.IdentityOpenIDSubSys: openid.Help,
|
||||
config.IdentityLDAPSubSys: xldap.Help,
|
||||
config.PolicyOPASubSys: opa.Help,
|
||||
@@ -213,7 +220,7 @@ var (
|
||||
globalServerConfigMu sync.RWMutex
|
||||
)
|
||||
|
||||
func validateConfig(s config.Config) error {
|
||||
func validateConfig(s config.Config, setDriveCount int) error {
|
||||
// Disable merging env values with config for validation.
|
||||
env.SetEnvOff()
|
||||
|
||||
@@ -233,8 +240,7 @@ func validateConfig(s config.Config) error {
|
||||
}
|
||||
|
||||
if globalIsErasure {
|
||||
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
|
||||
globalErasureSetDriveCount); err != nil {
|
||||
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -247,6 +253,10 @@ func validateConfig(s config.Config) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := crawler.LookupConfig(s[config.CrawlerSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
{
|
||||
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
|
||||
if err != nil {
|
||||
@@ -307,11 +317,10 @@ func validateConfig(s config.Config) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return notify.TestNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport(),
|
||||
globalNotificationSys.ConfiguredTargetIDs())
|
||||
return notify.TestNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), globalNotificationSys.ConfiguredTargetIDs())
|
||||
}
|
||||
|
||||
func lookupConfigs(s config.Config) {
|
||||
func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
ctx := GlobalContext
|
||||
|
||||
var err error
|
||||
@@ -323,6 +332,19 @@ func lookupConfigs(s config.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
if dnsURL, dnsUser, dnsPass, ok := env.LookupEnv(config.EnvDNSWebhook); ok {
|
||||
globalDNSConfig, err = dns.NewOperatorDNS(dnsURL,
|
||||
dns.Authentication(dnsUser, dnsPass),
|
||||
dns.RootCAs(globalRootCAs))
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
logger.FatalIf(err, "Unable to initialize remote webhook DNS config")
|
||||
} else {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
@@ -344,19 +366,25 @@ func lookupConfigs(s config.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil && globalDNSConfig == nil {
|
||||
globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config,
|
||||
dns.DomainNames(globalDomainNames),
|
||||
dns.DomainIPs(globalDomainIPs),
|
||||
dns.DomainPort(globalMinioPort),
|
||||
dns.CoreDNSPath(etcdCfg.CoreDNSPath),
|
||||
)
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
logger.FatalIf(err, "Unable to initialize DNS config")
|
||||
} else {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w",
|
||||
globalDomainNames, err))
|
||||
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
|
||||
if globalDNSConfig != nil {
|
||||
// if global DNS is already configured, indicate with a warning, incase
|
||||
// users are confused.
|
||||
logger.LogIf(ctx, fmt.Errorf("DNS store is already configured with %s, not using etcd for DNS store", globalDNSConfig))
|
||||
} else {
|
||||
globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config,
|
||||
dns.DomainNames(globalDomainNames),
|
||||
dns.DomainIPs(globalDomainIPs),
|
||||
dns.DomainPort(globalMinioPort),
|
||||
dns.CoreDNSPath(etcdCfg.CoreDNSPath),
|
||||
)
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
logger.FatalIf(err, "Unable to initialize DNS config")
|
||||
} else {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w",
|
||||
globalDomainNames, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -379,11 +407,15 @@ func lookupConfigs(s config.Config) {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
|
||||
globalAPIConfig.init(apiConfig)
|
||||
globalAPIConfig.init(apiConfig, setDriveCount)
|
||||
|
||||
// Initialize remote instance transport once.
|
||||
getRemoteInstanceTransportOnce.Do(func() {
|
||||
getRemoteInstanceTransport = newGatewayHTTPTransport(apiConfig.RemoteTransportDeadline)
|
||||
})
|
||||
|
||||
if globalIsErasure {
|
||||
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
|
||||
globalErasureSetDriveCount)
|
||||
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
|
||||
}
|
||||
@@ -406,6 +438,10 @@ func lookupConfigs(s config.Config) {
|
||||
}
|
||||
}
|
||||
}
|
||||
globalCrawlerConfig, err = crawler.LookupConfig(s[config.CrawlerSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to read crawler config: %w", err))
|
||||
}
|
||||
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
|
||||
if err != nil {
|
||||
@@ -419,6 +455,9 @@ func lookupConfigs(s config.Config) {
|
||||
|
||||
// Enable auto-encryption if enabled
|
||||
globalAutoEncryption = kmsCfg.AutoEncryption
|
||||
if globalAutoEncryption && !globalIsGateway {
|
||||
logger.LogIf(ctx, fmt.Errorf("%s env is deprecated please migrate to using `mc encrypt` at bucket level", crypto.EnvKMSAutoEncryption))
|
||||
}
|
||||
|
||||
globalCompressConfig, err = compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
@@ -457,37 +496,41 @@ func lookupConfigs(s config.Config) {
|
||||
for _, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
// Enable http logging
|
||||
logger.AddTarget(
|
||||
if err = logger.AddTarget(
|
||||
http.New(http.WithEndpoint(l.Endpoint),
|
||||
http.WithAuthToken(l.AuthToken),
|
||||
http.WithUserAgent(loggerUserAgent),
|
||||
http.WithLogKind(string(logger.All)),
|
||||
http.WithTransport(NewGatewayHTTPTransport()),
|
||||
),
|
||||
)
|
||||
); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize console HTTP target: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.Audit {
|
||||
if l.Enabled {
|
||||
// Enable http audit logging
|
||||
logger.AddAuditTarget(
|
||||
if err = logger.AddAuditTarget(
|
||||
http.New(http.WithEndpoint(l.Endpoint),
|
||||
http.WithAuthToken(l.AuthToken),
|
||||
http.WithUserAgent(loggerUserAgent),
|
||||
http.WithLogKind(string(logger.All)),
|
||||
http.WithTransport(NewGatewayHTTPTransport()),
|
||||
),
|
||||
)
|
||||
); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize audit HTTP target: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
globalConfigTargetList, err = notify.GetNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport(), false)
|
||||
globalConfigTargetList, err = notify.GetNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), false)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
|
||||
}
|
||||
|
||||
globalEnvTargetList, err = notify.GetNotificationTargets(newServerConfig(), GlobalContext.Done(), NewGatewayHTTPTransport(), true)
|
||||
globalEnvTargetList, err = notify.GetNotificationTargets(GlobalContext, newServerConfig(), NewGatewayHTTPTransport(), true)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
|
||||
}
|
||||
@@ -601,7 +644,7 @@ func loadConfig(objAPI ObjectLayer) error {
|
||||
}
|
||||
|
||||
// Override any values from ENVs.
|
||||
lookupConfigs(srvCfg)
|
||||
lookupConfigs(srvCfg, objAPI.SetDriveCount())
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
globalServerConfigMu.Lock()
|
||||
|
||||
@@ -29,15 +29,17 @@ import (
|
||||
|
||||
// API sub-system constants
|
||||
const (
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiReadyDeadline = "ready_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiReadyDeadline = "ready_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRemoteTransportDeadline = "remote_transport_deadline"
|
||||
|
||||
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
|
||||
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
|
||||
EnvAPIReadyDeadline = "MINIO_API_READY_DEADLINE"
|
||||
EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN"
|
||||
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
|
||||
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
|
||||
EnvAPIReadyDeadline = "MINIO_API_READY_DEADLINE"
|
||||
EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN"
|
||||
EnvAPIRemoteTransportDeadline = "MINIO_API_REMOTE_TRANSPORT_DEADLINE"
|
||||
)
|
||||
|
||||
// DefaultKVS - default storage class config
|
||||
@@ -59,15 +61,20 @@ var (
|
||||
Key: apiCorsAllowOrigin,
|
||||
Value: "*",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiRemoteTransportDeadline,
|
||||
Value: "2h",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// Config storage class configuration
|
||||
type Config struct {
|
||||
APIRequestsMax int `json:"requests_max"`
|
||||
APIRequestsDeadline time.Duration `json:"requests_deadline"`
|
||||
APIReadyDeadline time.Duration `json:"ready_deadline"`
|
||||
APICorsAllowOrigin []string `json:"cors_allow_origin"`
|
||||
RequestsMax int `json:"requests_max"`
|
||||
RequestsDeadline time.Duration `json:"requests_deadline"`
|
||||
ReadyDeadline time.Duration `json:"ready_deadline"`
|
||||
CorsAllowOrigin []string `json:"cors_allow_origin"`
|
||||
RemoteTransportDeadline time.Duration `json:"remote_transport_deadline"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
|
||||
@@ -108,10 +115,17 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
}
|
||||
|
||||
corsAllowOrigin := strings.Split(env.Get(EnvAPICorsAllowOrigin, kvs.Get(apiCorsAllowOrigin)), ",")
|
||||
|
||||
remoteTransportDeadline, err := time.ParseDuration(env.Get(EnvAPIRemoteTransportDeadline, kvs.Get(apiRemoteTransportDeadline)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
return Config{
|
||||
APIRequestsMax: requestsMax,
|
||||
APIRequestsDeadline: requestsDeadline,
|
||||
APIReadyDeadline: readyDeadline,
|
||||
APICorsAllowOrigin: corsAllowOrigin,
|
||||
RequestsMax: requestsMax,
|
||||
RequestsDeadline: requestsDeadline,
|
||||
ReadyDeadline: readyDeadline,
|
||||
CorsAllowOrigin: corsAllowOrigin,
|
||||
RemoteTransportDeadline: remoteTransportDeadline,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -39,5 +39,11 @@ var (
|
||||
Optional: true,
|
||||
Type: "csv",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: apiRemoteTransportDeadline,
|
||||
Description: `set the deadline for API requests on remote transports while proxying between federated instances e.g. "2h"`,
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -24,8 +24,6 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
@@ -69,38 +67,6 @@ func ParsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err er
|
||||
return x509Certs, nil
|
||||
}
|
||||
|
||||
// GetRootCAs - returns all the root CAs into certPool
|
||||
// at the input certsCADir
|
||||
func GetRootCAs(certsCAsDir string) (*x509.CertPool, error) {
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
if rootCAs == nil {
|
||||
// In some systems (like Windows) system cert pool is
|
||||
// not supported or no certificates are present on the
|
||||
// system - so we create a new cert pool.
|
||||
rootCAs = x509.NewCertPool()
|
||||
}
|
||||
|
||||
fis, err := ioutil.ReadDir(certsCAsDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
// Return success if CA's directory is missing or permission denied.
|
||||
err = nil
|
||||
}
|
||||
return rootCAs, err
|
||||
}
|
||||
|
||||
// Load all custom CA files.
|
||||
for _, fi := range fis {
|
||||
caCert, err := ioutil.ReadFile(path.Join(certsCAsDir, fi.Name()))
|
||||
if err != nil {
|
||||
// ignore files which are not readable.
|
||||
continue
|
||||
}
|
||||
rootCAs.AppendCertsFromPEM(caCert)
|
||||
}
|
||||
return rootCAs, nil
|
||||
}
|
||||
|
||||
// LoadX509KeyPair - load an X509 key pair (private key , certificate)
|
||||
// from the provided paths. The private key may be encrypted and is
|
||||
// decrypted using the ENV_VAR: MINIO_CERT_PASSWD.
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
@@ -194,60 +193,6 @@ M9ofSEt/bdRD
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRootCAs(t *testing.T) {
|
||||
emptydir, err := ioutil.TempDir("", "test-get-root-cas")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable create temp directory. %v", emptydir)
|
||||
}
|
||||
defer os.RemoveAll(emptydir)
|
||||
|
||||
dir1, err := ioutil.TempDir("", "test-get-root-cas")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable create temp directory. %v", dir1)
|
||||
}
|
||||
defer os.RemoveAll(dir1)
|
||||
if err = os.Mkdir(filepath.Join(dir1, "empty-dir"), 0755); err != nil {
|
||||
t.Fatalf("Unable create empty dir. %v", err)
|
||||
}
|
||||
|
||||
dir2, err := ioutil.TempDir("", "test-get-root-cas")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable create temp directory. %v", dir2)
|
||||
}
|
||||
defer os.RemoveAll(dir2)
|
||||
if err = ioutil.WriteFile(filepath.Join(dir2, "empty-file"), []byte{}, 0644); err != nil {
|
||||
t.Fatalf("Unable create test file. %v", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
certCAsDir string
|
||||
expectedErr error
|
||||
}{
|
||||
// ignores non-existent directories.
|
||||
{"nonexistent-dir", nil},
|
||||
// Ignores directories.
|
||||
{dir1, nil},
|
||||
// Ignore empty directory.
|
||||
{emptydir, nil},
|
||||
// Loads the cert properly.
|
||||
{dir2, nil},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
_, err := GetRootCAs(testCase.certCAsDir)
|
||||
|
||||
if testCase.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
}
|
||||
} else if err == nil {
|
||||
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
|
||||
} else if testCase.expectedErr.Error() != err.Error() {
|
||||
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadX509KeyPair(t *testing.T) {
|
||||
for i, testCase := range loadX509KeyPairTests {
|
||||
privateKey, err := createTempFile("private.key", testCase.privateKey)
|
||||
|
||||
@@ -76,6 +76,7 @@ const (
|
||||
KmsKesSubSys = "kms_kes"
|
||||
LoggerWebhookSubSys = "logger_webhook"
|
||||
AuditWebhookSubSys = "audit_webhook"
|
||||
CrawlerSubSys = "crawler"
|
||||
|
||||
// Add new constants here if you add new fields to config.
|
||||
)
|
||||
@@ -112,6 +113,7 @@ var SubSystems = set.CreateStringSet([]string{
|
||||
PolicyOPASubSys,
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
CrawlerSubSys,
|
||||
NotifyAMQPSubSys,
|
||||
NotifyESSubSys,
|
||||
NotifyKafkaSubSys,
|
||||
@@ -138,6 +140,7 @@ var SubSystemsSingleTargets = set.CreateStringSet([]string{
|
||||
PolicyOPASubSys,
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
CrawlerSubSys,
|
||||
}...)
|
||||
|
||||
// Constant separators
|
||||
@@ -448,6 +451,11 @@ func (c Config) Merge() Config {
|
||||
ckvs.Set(kv.Key, kv.Value)
|
||||
}
|
||||
}
|
||||
if _, ok := cp[subSys]; !ok {
|
||||
// A config subsystem was removed or server was downgraded.
|
||||
Logger.Info("config: ignoring unknown subsystem config %q\n", subSys)
|
||||
continue
|
||||
}
|
||||
cp[subSys][tgt] = ckvs
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,11 +31,13 @@ const (
|
||||
EnvDomain = "MINIO_DOMAIN"
|
||||
EnvRegionName = "MINIO_REGION_NAME"
|
||||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvEndpoints = "MINIO_ENDPOINTS"
|
||||
EnvFSOSync = "MINIO_FS_OSYNC"
|
||||
EnvArgs = "MINIO_ARGS"
|
||||
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
|
||||
|
||||
EnvUpdate = "MINIO_UPDATE"
|
||||
|
||||
EnvWorm = "MINIO_WORM" // legacy
|
||||
EnvRegion = "MINIO_REGION" // legacy
|
||||
EnvEndpoints = "MINIO_ENDPOINTS" // legacy
|
||||
EnvWorm = "MINIO_WORM" // legacy
|
||||
EnvRegion = "MINIO_REGION" // legacy
|
||||
)
|
||||
|
||||
67
cmd/config/crawler/crawler.go
Normal file
67
cmd/config/crawler/crawler.go
Normal file
@@ -0,0 +1,67 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package crawler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
)
|
||||
|
||||
// Compression environment variables
|
||||
const (
|
||||
BitrotScan = "bitrotscan"
|
||||
)
|
||||
|
||||
// Config represents the crawler settings.
|
||||
type Config struct {
|
||||
// Bitrot will perform bitrot scan on local disk when checking objects.
|
||||
Bitrot bool `json:"bitrotscan"`
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultKVS - default KV config for crawler settings
|
||||
DefaultKVS = config.KVS{
|
||||
config.KV{
|
||||
Key: BitrotScan,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
}
|
||||
|
||||
// Help provides help for config values
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: BitrotScan,
|
||||
Description: `perform bitrot scan on disks when checking objects during crawl`,
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// LookupConfig - lookup config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
if err = config.CheckValidKeys(config.CrawlerSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
bitrot := kvs.Get(BitrotScan)
|
||||
if bitrot != config.EnableOn && bitrot != config.EnableOff {
|
||||
return cfg, errors.New(BitrotScan + ": must be 'on' or 'off'")
|
||||
}
|
||||
cfg.Bitrot = bitrot == config.EnableOn
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -26,9 +26,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
|
||||
"github.com/coredns/coredns/plugin/etcd/msg"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"go.etcd.io/etcd/v3/clientv3"
|
||||
)
|
||||
|
||||
@@ -214,6 +213,11 @@ func (c *CoreDNS) DeleteRecord(record SrvRecord) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// String stringer name for this implementation of dns.Store
|
||||
func (c *CoreDNS) String() string {
|
||||
return "etcdDNS"
|
||||
}
|
||||
|
||||
// CoreDNS - represents dns config for coredns server.
|
||||
type CoreDNS struct {
|
||||
domainNames []string
|
||||
@@ -223,13 +227,13 @@ type CoreDNS struct {
|
||||
etcdClient *clientv3.Client
|
||||
}
|
||||
|
||||
// Option - functional options pattern style
|
||||
type Option func(*CoreDNS)
|
||||
// EtcdOption - functional options pattern style
|
||||
type EtcdOption func(*CoreDNS)
|
||||
|
||||
// DomainNames set a list of domain names used by this CoreDNS
|
||||
// client setting, note this will fail if set to empty when
|
||||
// constructor initializes.
|
||||
func DomainNames(domainNames []string) Option {
|
||||
func DomainNames(domainNames []string) EtcdOption {
|
||||
return func(args *CoreDNS) {
|
||||
args.domainNames = domainNames
|
||||
}
|
||||
@@ -237,14 +241,14 @@ func DomainNames(domainNames []string) Option {
|
||||
|
||||
// DomainIPs set a list of custom domain IPs, note this will
|
||||
// fail if set to empty when constructor initializes.
|
||||
func DomainIPs(domainIPs set.StringSet) Option {
|
||||
func DomainIPs(domainIPs set.StringSet) EtcdOption {
|
||||
return func(args *CoreDNS) {
|
||||
args.domainIPs = domainIPs
|
||||
}
|
||||
}
|
||||
|
||||
// DomainPort - is a string version of server port
|
||||
func DomainPort(domainPort string) Option {
|
||||
func DomainPort(domainPort string) EtcdOption {
|
||||
return func(args *CoreDNS) {
|
||||
args.domainPort = domainPort
|
||||
}
|
||||
@@ -253,14 +257,14 @@ func DomainPort(domainPort string) Option {
|
||||
// CoreDNSPath - custom prefix on etcd to populate DNS
|
||||
// service records, optional and can be empty.
|
||||
// if empty then c.prefixPath is used i.e "/skydns"
|
||||
func CoreDNSPath(prefix string) Option {
|
||||
func CoreDNSPath(prefix string) EtcdOption {
|
||||
return func(args *CoreDNS) {
|
||||
args.prefixPath = prefix
|
||||
}
|
||||
}
|
||||
|
||||
// NewCoreDNS - initialize a new coreDNS set/unset values.
|
||||
func NewCoreDNS(cfg clientv3.Config, setters ...Option) (Store, error) {
|
||||
func NewCoreDNS(cfg clientv3.Config, setters ...EtcdOption) (Store, error) {
|
||||
etcdClient, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
221
cmd/config/dns/operator_dns.go
Normal file
221
cmd/config/dns/operator_dns.go
Normal file
@@ -0,0 +1,221 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultOperatorContextTimeout = 10 * time.Second
|
||||
// ErrNotImplemented - Indicates the functionality which is not implemented
|
||||
ErrNotImplemented = errors.New("The method is not implemented")
|
||||
)
|
||||
|
||||
func (c *OperatorDNS) addAuthHeader(r *http.Request) error {
|
||||
if c.username == "" || c.password == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
claims := &jwt.StandardClaims{
|
||||
ExpiresAt: int64(15 * time.Minute),
|
||||
Issuer: c.username,
|
||||
Subject: config.EnvDNSWebhook,
|
||||
}
|
||||
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS512, claims)
|
||||
ss, err := token.SignedString([]byte(c.password))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Header.Set("Authorization", "Bearer "+ss)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *OperatorDNS) endpoint(bucket string, delete bool) (string, error) {
|
||||
u, err := url.Parse(c.Endpoint)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
q := u.Query()
|
||||
q.Add("bucket", bucket)
|
||||
q.Add("delete", strconv.FormatBool(delete))
|
||||
u.RawQuery = q.Encode()
|
||||
return u.String(), nil
|
||||
}
|
||||
|
||||
// Put - Adds DNS entries into operator webhook server
|
||||
func (c *OperatorDNS) Put(bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultOperatorContextTimeout)
|
||||
defer cancel()
|
||||
e, err := c.endpoint(bucket, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = c.addAuthHeader(req); err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
if derr := c.Delete(bucket); derr != nil {
|
||||
return derr
|
||||
}
|
||||
}
|
||||
xhttp.DrainBody(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("request to create the service for bucket %s, failed with status %s", bucket, resp.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete - Removes DNS entries added in Put().
|
||||
func (c *OperatorDNS) Delete(bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultOperatorContextTimeout)
|
||||
defer cancel()
|
||||
e, err := c.endpoint(bucket, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = c.addAuthHeader(req); err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
xhttp.DrainBody(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("request to delete the service for bucket %s, failed with status %s", bucket, resp.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRecord - Removes a specific DNS entry
|
||||
// No Op for Operator because operator deals on with bucket entries
|
||||
func (c *OperatorDNS) DeleteRecord(record SrvRecord) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
// Close closes the internal http client
|
||||
func (c *OperatorDNS) Close() error {
|
||||
c.httpClient.CloseIdleConnections()
|
||||
return nil
|
||||
}
|
||||
|
||||
// List - Retrieves list of DNS entries for the domain.
|
||||
// This is a No Op for Operator because, there is no intent to enforce global
|
||||
// namespace at MinIO level with this DNS entry. The global namespace in
|
||||
// enforced by the Kubernetes Operator
|
||||
func (c *OperatorDNS) List() (srvRecords map[string][]SrvRecord, err error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
|
||||
// Get - Retrieves DNS records for a bucket.
|
||||
// This is a No Op for Operator because, there is no intent to enforce global
|
||||
// namespace at MinIO level with this DNS entry. The global namespace in
|
||||
// enforced by the Kubernetes Operator
|
||||
func (c *OperatorDNS) Get(bucket string) (srvRecords []SrvRecord, err error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
|
||||
// String stringer name for this implementation of dns.Store
|
||||
func (c *OperatorDNS) String() string {
|
||||
return "webhookDNS"
|
||||
}
|
||||
|
||||
// OperatorDNS - represents dns config for MinIO k8s operator.
|
||||
type OperatorDNS struct {
|
||||
httpClient *http.Client
|
||||
Endpoint string
|
||||
rootCAs *x509.CertPool
|
||||
username string
|
||||
password string
|
||||
}
|
||||
|
||||
// OperatorOption - functional options pattern style for OperatorDNS
|
||||
type OperatorOption func(*OperatorDNS)
|
||||
|
||||
// Authentication - custom username and password for authenticating at the endpoint
|
||||
func Authentication(username, password string) OperatorOption {
|
||||
return func(args *OperatorDNS) {
|
||||
args.username = username
|
||||
args.password = password
|
||||
}
|
||||
}
|
||||
|
||||
// RootCAs - add custom trust certs pool
|
||||
func RootCAs(CAs *x509.CertPool) OperatorOption {
|
||||
return func(args *OperatorDNS) {
|
||||
args.rootCAs = CAs
|
||||
}
|
||||
}
|
||||
|
||||
// NewOperatorDNS - initialize a new K8S Operator DNS set/unset values.
|
||||
func NewOperatorDNS(endpoint string, setters ...OperatorOption) (Store, error) {
|
||||
if endpoint == "" {
|
||||
return nil, errors.New("invalid argument")
|
||||
}
|
||||
|
||||
args := &OperatorDNS{
|
||||
Endpoint: endpoint,
|
||||
}
|
||||
for _, setter := range setters {
|
||||
setter(args)
|
||||
}
|
||||
args.httpClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 3 * time.Second,
|
||||
KeepAlive: 5 * time.Second,
|
||||
}).DialContext,
|
||||
ResponseHeaderTimeout: 3 * time.Second,
|
||||
TLSHandshakeTimeout: 3 * time.Second,
|
||||
ExpectContinueTimeout: 3 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: args.rootCAs,
|
||||
},
|
||||
// Go net/http automatically unzip if content-type is
|
||||
// gzip disable this feature, as we are always interested
|
||||
// in raw stream.
|
||||
DisableCompression: true,
|
||||
},
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
@@ -24,4 +24,5 @@ type Store interface {
|
||||
List() (map[string][]SrvRecord, error)
|
||||
DeleteRecord(record SrvRecord) error
|
||||
Close() error
|
||||
String() string
|
||||
}
|
||||
@@ -187,12 +187,6 @@ Example 1:
|
||||
"",
|
||||
)
|
||||
|
||||
ErrCorruptedBackend = newErrFn(
|
||||
"Unable to use the specified backend, pre-existing content detected",
|
||||
"Please ensure your disk mount does not have any pre-existing content",
|
||||
"",
|
||||
)
|
||||
|
||||
ErrUnableToWriteInBackend = newErrFn(
|
||||
"Unable to write to the backend",
|
||||
"Please ensure MinIO binary has write permissions for the backend",
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
@@ -49,10 +50,14 @@ type Config struct {
|
||||
publicKeys map[string]crypto.PublicKey
|
||||
transport *http.Transport
|
||||
closeRespFn func(io.ReadCloser)
|
||||
mutex *sync.Mutex
|
||||
}
|
||||
|
||||
// PopulatePublicKey - populates a new publickey from the JWKS URL.
|
||||
func (r *Config) PopulatePublicKey() error {
|
||||
r.mutex.Lock()
|
||||
defer r.mutex.Unlock()
|
||||
|
||||
if r.JWKS.URL == nil || r.JWKS.URL.String() == "" {
|
||||
return nil
|
||||
}
|
||||
@@ -185,7 +190,15 @@ func (p *JWT) Validate(token, dsecs string) (map[string]interface{}, error) {
|
||||
var claims jwtgo.MapClaims
|
||||
jwtToken, err := jp.ParseWithClaims(token, &claims, keyFuncCallback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Re-populate the public key in-case the JWKS
|
||||
// pubkeys are refreshed
|
||||
if err = p.PopulatePublicKey(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jwtToken, err = jwtgo.ParseWithClaims(token, &claims, keyFuncCallback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !jwtToken.Valid {
|
||||
@@ -317,6 +330,7 @@ func LookupConfig(kvs config.KVS, transport *http.Transport, closeRespFn func(io
|
||||
ClientID: env.Get(EnvIdentityOpenIDClientID, kvs.Get(ClientID)),
|
||||
transport: transport,
|
||||
closeRespFn: closeRespFn,
|
||||
mutex: &sync.Mutex{}, // allocate for copying
|
||||
}
|
||||
|
||||
configURL := env.Get(EnvIdentityOpenIDURL, kvs.Get(ConfigURL))
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"crypto"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -89,6 +90,7 @@ func TestJWTAzureFail(t *testing.T) {
|
||||
}
|
||||
|
||||
cfg := Config{}
|
||||
cfg.mutex = &sync.Mutex{}
|
||||
cfg.JWKS.URL = u1
|
||||
cfg.publicKeys = keys
|
||||
jwt := NewJWT(cfg)
|
||||
@@ -136,6 +138,7 @@ func TestJWT(t *testing.T) {
|
||||
}
|
||||
|
||||
cfg := Config{}
|
||||
cfg.mutex = &sync.Mutex{}
|
||||
cfg.JWKS.URL = u1
|
||||
cfg.publicKeys = keys
|
||||
jwt := NewJWT(cfg)
|
||||
|
||||
27
cmd/config/logger.go
Normal file
27
cmd/config/logger.go
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import "context"
|
||||
|
||||
// Logger contains injected logger methods.
|
||||
var Logger = struct {
|
||||
Info func(msg string, data ...interface{})
|
||||
LogIf func(ctx context.Context, err error, errKind ...interface{})
|
||||
}{
|
||||
// Initialized via injection.
|
||||
}
|
||||
@@ -559,6 +559,18 @@ var (
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.ElasticUsername,
|
||||
Description: "username for Elasticsearch basic-auth",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.ElasticPassword,
|
||||
Description: "password for Elasticsearch basic-auth",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
|
||||
@@ -201,6 +201,14 @@ func SetNotifyES(s config.Config, esName string, cfg target.ElasticsearchArgs) e
|
||||
Key: target.ElasticQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticUsername,
|
||||
Value: cfg.Username,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticPassword,
|
||||
Value: cfg.Password,
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -331,7 +339,7 @@ func SetNotifyPostgres(s config.Config, psqName string, cfg target.PostgreSQLArg
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresUsername,
|
||||
Value: cfg.User,
|
||||
Value: cfg.Username,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresPassword,
|
||||
|
||||
@@ -43,11 +43,10 @@ var ErrTargetsOffline = errors.New("one or more targets are offline. Please use
|
||||
|
||||
// TestNotificationTargets is similar to GetNotificationTargets()
|
||||
// avoids explicit registration.
|
||||
func TestNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport,
|
||||
targetIDs []event.TargetID) error {
|
||||
func TestNotificationTargets(ctx context.Context, cfg config.Config, transport *http.Transport, targetIDs []event.TargetID) error {
|
||||
test := true
|
||||
returnOnTargetError := true
|
||||
targets, err := RegisterNotificationTargets(cfg, doneCh, transport, targetIDs, test, returnOnTargetError)
|
||||
targets, err := RegisterNotificationTargets(ctx, cfg, transport, targetIDs, test, returnOnTargetError)
|
||||
if err == nil {
|
||||
// Close all targets since we are only testing connections.
|
||||
for _, t := range targets.TargetMap() {
|
||||
@@ -60,9 +59,9 @@ func TestNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transpor
|
||||
|
||||
// GetNotificationTargets registers and initializes all notification
|
||||
// targets, returns error if any.
|
||||
func GetNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, test bool) (*event.TargetList, error) {
|
||||
func GetNotificationTargets(ctx context.Context, cfg config.Config, transport *http.Transport, test bool) (*event.TargetList, error) {
|
||||
returnOnTargetError := false
|
||||
return RegisterNotificationTargets(cfg, doneCh, transport, nil, test, returnOnTargetError)
|
||||
return RegisterNotificationTargets(ctx, cfg, transport, nil, test, returnOnTargetError)
|
||||
}
|
||||
|
||||
// RegisterNotificationTargets - returns TargetList which contains enabled targets in serverConfig.
|
||||
@@ -70,8 +69,8 @@ func GetNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
// * Add a new target in pkg/event/target package.
|
||||
// * Add newly added target configuration to serverConfig.Notify.<TARGET_NAME>.
|
||||
// * Handle the configuration in this function to create/add into TargetList.
|
||||
func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, targetIDs []event.TargetID, test bool, returnOnTargetError bool) (*event.TargetList, error) {
|
||||
targetList, err := FetchRegisteredTargets(cfg, doneCh, transport, test, returnOnTargetError)
|
||||
func RegisterNotificationTargets(ctx context.Context, cfg config.Config, transport *http.Transport, targetIDs []event.TargetID, test bool, returnOnTargetError bool) (*event.TargetList, error) {
|
||||
targetList, err := FetchRegisteredTargets(ctx, cfg, transport, test, returnOnTargetError)
|
||||
if err != nil {
|
||||
return targetList, err
|
||||
}
|
||||
@@ -94,7 +93,7 @@ func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, tran
|
||||
// FetchRegisteredTargets - Returns a set of configured TargetList
|
||||
// If `returnOnTargetError` is set to true, The function returns when a target initialization fails
|
||||
// Else, the function will return a complete TargetList irrespective of errors
|
||||
func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, test bool, returnOnTargetError bool) (_ *event.TargetList, err error) {
|
||||
func FetchRegisteredTargets(ctx context.Context, cfg config.Config, transport *http.Transport, test bool, returnOnTargetError bool) (_ *event.TargetList, err error) {
|
||||
targetList := event.NewTargetList()
|
||||
var targetsOffline bool
|
||||
|
||||
@@ -118,7 +117,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
return nil, err
|
||||
}
|
||||
|
||||
esTargets, err := GetNotifyES(cfg[config.NotifyESSubSys])
|
||||
esTargets, err := GetNotifyES(cfg[config.NotifyESSubSys], transport)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -167,7 +166,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewAMQPTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewAMQPTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -188,7 +187,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewElasticsearchTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewElasticsearchTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -209,7 +208,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
continue
|
||||
}
|
||||
args.TLS.RootCAs = transport.TLSClientConfig.RootCAs
|
||||
newTarget, err := target.NewKafkaTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewKafkaTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -230,7 +229,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
continue
|
||||
}
|
||||
args.RootCAs = transport.TLSClientConfig.RootCAs
|
||||
newTarget, err := target.NewMQTTTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewMQTTTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -250,7 +249,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewMySQLTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewMySQLTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -270,7 +269,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewNATSTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewNATSTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -290,7 +289,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewNSQTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewNSQTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -310,7 +309,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -330,7 +329,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewRedisTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewRedisTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -350,7 +349,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewWebhookTarget(id, args, doneCh, logger.LogOnceIf, transport, test)
|
||||
newTarget, err := target.NewWebhookTarget(ctx, id, args, logger.LogOnceIf, transport, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -1532,11 +1531,19 @@ var (
|
||||
Key: target.ElasticQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticUsername,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticPassword,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// GetNotifyES - returns a map of registered notification 'elasticsearch' targets
|
||||
func GetNotifyES(esKVS map[string]config.KVS) (map[string]target.ElasticsearchArgs, error) {
|
||||
func GetNotifyES(esKVS map[string]config.KVS, transport *http.Transport) (map[string]target.ElasticsearchArgs, error) {
|
||||
esTargets := make(map[string]target.ElasticsearchArgs)
|
||||
for k, kv := range mergeTargets(esKVS, target.EnvElasticEnable, DefaultESKVS) {
|
||||
enableEnv := target.EnvElasticEnable
|
||||
@@ -1586,6 +1593,16 @@ func GetNotifyES(esKVS map[string]config.KVS) (map[string]target.ElasticsearchAr
|
||||
queueDirEnv = queueDirEnv + config.Default + k
|
||||
}
|
||||
|
||||
usernameEnv := target.EnvElasticUsername
|
||||
if k != config.Default {
|
||||
usernameEnv = usernameEnv + config.Default + k
|
||||
}
|
||||
|
||||
passwordEnv := target.EnvElasticPassword
|
||||
if k != config.Default {
|
||||
passwordEnv = passwordEnv + config.Default + k
|
||||
}
|
||||
|
||||
esArgs := target.ElasticsearchArgs{
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.ElasticFormat)),
|
||||
@@ -1593,6 +1610,9 @@ func GetNotifyES(esKVS map[string]config.KVS) (map[string]target.ElasticsearchAr
|
||||
Index: env.Get(indexEnv, kv.Get(target.ElasticIndex)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.ElasticQueueDir)),
|
||||
QueueLimit: uint64(queueLimit),
|
||||
Transport: transport,
|
||||
Username: env.Get(usernameEnv, kv.Get(target.ElasticUsername)),
|
||||
Password: env.Get(passwordEnv, kv.Get(target.ElasticPassword)),
|
||||
}
|
||||
if err = esArgs.Validate(); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -156,7 +156,7 @@ func parseStorageClass(storageClassEnv string) (sc StorageClass, err error) {
|
||||
}
|
||||
|
||||
// Validates the parity disks.
|
||||
func validateParity(ssParity, rrsParity, drivesPerSet int) (err error) {
|
||||
func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
|
||||
if ssParity == 0 && rrsParity == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -174,12 +174,12 @@ func validateParity(ssParity, rrsParity, drivesPerSet int) (err error) {
|
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be greater than or equal to %d", rrsParity, minParityDisks)
|
||||
}
|
||||
|
||||
if ssParity > drivesPerSet/2 {
|
||||
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, drivesPerSet/2)
|
||||
if ssParity > setDriveCount/2 {
|
||||
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, setDriveCount/2)
|
||||
}
|
||||
|
||||
if rrsParity > drivesPerSet/2 {
|
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, drivesPerSet/2)
|
||||
if rrsParity > setDriveCount/2 {
|
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, setDriveCount/2)
|
||||
}
|
||||
|
||||
if ssParity > 0 && rrsParity > 0 {
|
||||
@@ -220,13 +220,13 @@ func Enabled(kvs config.KVS) bool {
|
||||
}
|
||||
|
||||
// LookupConfig - lookup storage class config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
|
||||
func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
|
||||
cfg = Config{}
|
||||
cfg.Standard.Parity = drivesPerSet / 2
|
||||
cfg.Standard.Parity = setDriveCount / 2
|
||||
cfg.RRS.Parity = defaultRRSParity
|
||||
|
||||
if err = config.CheckValidKeys(config.StorageClassSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
ssc := env.Get(StandardEnv, kvs.Get(ClassStandard))
|
||||
@@ -235,17 +235,17 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
|
||||
if ssc != "" {
|
||||
cfg.Standard, err = parseStorageClass(ssc)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
return Config{}, err
|
||||
}
|
||||
}
|
||||
if cfg.Standard.Parity == 0 {
|
||||
cfg.Standard.Parity = drivesPerSet / 2
|
||||
cfg.Standard.Parity = setDriveCount / 2
|
||||
}
|
||||
|
||||
if rrsc != "" {
|
||||
cfg.RRS, err = parseStorageClass(rrsc)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
return Config{}, err
|
||||
}
|
||||
}
|
||||
if cfg.RRS.Parity == 0 {
|
||||
@@ -254,8 +254,8 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
|
||||
|
||||
// Validation is done after parsing both the storage classes. This is needed because we need one
|
||||
// storage class value to deduce the correct value of the other storage class.
|
||||
if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, drivesPerSet); err != nil {
|
||||
return cfg, err
|
||||
if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, setDriveCount); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
|
||||
@@ -69,10 +69,10 @@ func TestParseStorageClass(t *testing.T) {
|
||||
|
||||
func TestValidateParity(t *testing.T) {
|
||||
tests := []struct {
|
||||
rrsParity int
|
||||
ssParity int
|
||||
success bool
|
||||
drivesPerSet int
|
||||
rrsParity int
|
||||
ssParity int
|
||||
success bool
|
||||
setDriveCount int
|
||||
}{
|
||||
{2, 4, true, 16},
|
||||
{3, 3, true, 16},
|
||||
@@ -85,7 +85,7 @@ func TestValidateParity(t *testing.T) {
|
||||
{9, 2, false, 16},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
err := validateParity(tt.ssParity, tt.rrsParity, tt.drivesPerSet)
|
||||
err := validateParity(tt.ssParity, tt.rrsParity, tt.setDriveCount)
|
||||
if err != nil && tt.success {
|
||||
t.Errorf("Test %d, Expected success, got %s", i+1, err)
|
||||
}
|
||||
|
||||
@@ -117,6 +117,29 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh <-chan
|
||||
sys.pubsub.Subscribe(subCh, doneCh, filter)
|
||||
}
|
||||
|
||||
// Validate if HTTPConsoleLoggerSys is valid, always returns nil right now
|
||||
func (sys *HTTPConsoleLoggerSys) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Content returns the console stdout log
|
||||
func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {
|
||||
sys.RLock()
|
||||
sys.logBuf.Do(func(p interface{}) {
|
||||
if p != nil {
|
||||
lg, ok := p.(log.Info)
|
||||
if ok {
|
||||
if (lg.Entry != log.Entry{}) {
|
||||
logs = append(logs, lg.Entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
sys.RUnlock()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Send log message 'e' to console and publish to console
|
||||
// log pubsub system
|
||||
func (sys *HTTPConsoleLoggerSys) Send(e interface{}, logKind string) error {
|
||||
|
||||
@@ -16,11 +16,14 @@ package crypto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/ellipses"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
@@ -167,7 +170,8 @@ const (
|
||||
|
||||
const (
|
||||
// EnvKMSKesEndpoint is the environment variable used to specify
|
||||
// the kes server HTTPS endpoint.
|
||||
// one or multiple KES server HTTPS endpoints. The individual
|
||||
// endpoints should be separated by ','.
|
||||
EnvKMSKesEndpoint = "MINIO_KMS_KES_ENDPOINT"
|
||||
|
||||
// EnvKMSKesKeyFile is the environment variable used to specify
|
||||
@@ -216,16 +220,36 @@ func LookupKesConfig(kvs config.KVS) (KesConfig, error) {
|
||||
kesCfg := KesConfig{}
|
||||
|
||||
endpointStr := env.Get(EnvKMSKesEndpoint, kvs.Get(KMSKesEndpoint))
|
||||
if endpointStr != "" {
|
||||
// Lookup kes configuration & overwrite config entry if ENV var is present
|
||||
endpoint, err := xnet.ParseHTTPURL(endpointStr)
|
||||
var endpoints []string
|
||||
for _, endpoint := range strings.Split(endpointStr, ",") {
|
||||
if strings.TrimSpace(endpoint) == "" {
|
||||
continue
|
||||
}
|
||||
if !ellipses.HasEllipses(endpoint) {
|
||||
endpoints = append(endpoints, endpoint)
|
||||
continue
|
||||
}
|
||||
pattern, err := ellipses.FindEllipsesPatterns(endpoint)
|
||||
if err != nil {
|
||||
return kesCfg, err
|
||||
}
|
||||
endpointStr = endpoint.String()
|
||||
for _, p := range pattern {
|
||||
endpoints = append(endpoints, p.Expand()...)
|
||||
}
|
||||
}
|
||||
if len(endpoints) == 0 {
|
||||
return kesCfg, nil
|
||||
}
|
||||
|
||||
kesCfg.Endpoint = endpointStr
|
||||
randNum := rand.Intn(len(endpoints) + 1) // We add 1 b/c len(endpoints) may be 0: See: rand.Intn docs
|
||||
kesCfg.Endpoint = make([]string, len(endpoints))
|
||||
for i, endpoint := range endpoints {
|
||||
endpoint, err := xnet.ParseHTTPURL(endpoint)
|
||||
if err != nil {
|
||||
return kesCfg, err
|
||||
}
|
||||
kesCfg.Endpoint[(randNum+i)%len(endpoints)] = endpoint.String()
|
||||
}
|
||||
kesCfg.KeyFile = env.Get(EnvKMSKesKeyFile, kvs.Get(KMSKesKeyFile))
|
||||
kesCfg.CertFile = env.Get(EnvKMSKesCertFile, kvs.Get(KMSKesCertFile))
|
||||
kesCfg.CAPath = env.Get(EnvKMSKesCAPath, kvs.Get(KMSKesCAPath))
|
||||
|
||||
@@ -18,9 +18,11 @@ import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
||||
// SSEHeader is the general AWS SSE HTTP header key.
|
||||
@@ -81,6 +83,8 @@ const (
|
||||
func RemoveSensitiveHeaders(h http.Header) {
|
||||
h.Del(SSECKey)
|
||||
h.Del(SSECopyKey)
|
||||
h.Del(xhttp.AmzMetaUnencryptedContentLength)
|
||||
h.Del(xhttp.AmzMetaUnencryptedContentMD5)
|
||||
}
|
||||
|
||||
// IsRequested returns true if the HTTP headers indicates
|
||||
@@ -144,6 +148,7 @@ func (s3KMS) ParseHTTP(h http.Header) (string, interface{}, error) {
|
||||
contextStr, ok := h[SSEKmsContext]
|
||||
if ok {
|
||||
var context map[string]interface{}
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err := json.Unmarshal([]byte(contextStr[0]), &context); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
@@ -457,6 +457,16 @@ var removeSensitiveHeadersTests = []struct {
|
||||
"X-Amz-Meta-Test-1": []string{"Test-1"},
|
||||
},
|
||||
},
|
||||
{ // https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
Header: http.Header{
|
||||
"X-Amz-Meta-X-Amz-Unencrypted-Content-Md5": []string{"value"},
|
||||
"X-Amz-Meta-X-Amz-Unencrypted-Content-Length": []string{"value"},
|
||||
"X-Amz-Meta-Test-1": []string{"Test-1"},
|
||||
},
|
||||
ExpectedHeader: http.Header{
|
||||
"X-Amz-Meta-Test-1": []string{"Test-1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestRemoveSensitiveHeaders(t *testing.T) {
|
||||
|
||||
@@ -16,9 +16,9 @@ package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -30,10 +30,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
// ErrKESKeyExists is the error returned a KES server
|
||||
// when a master key does exist.
|
||||
var ErrKESKeyExists = NewKESError(http.StatusBadRequest, "key does already exist")
|
||||
@@ -43,8 +46,8 @@ var ErrKESKeyExists = NewKESError(http.StatusBadRequest, "key does already exist
|
||||
type KesConfig struct {
|
||||
Enabled bool
|
||||
|
||||
// The kes server endpoint.
|
||||
Endpoint string
|
||||
// The KES server endpoints.
|
||||
Endpoint []string
|
||||
|
||||
// The path to the TLS private key used
|
||||
// by MinIO to authenticate to the kes
|
||||
@@ -83,7 +86,7 @@ type KesConfig struct {
|
||||
// Verify verifies if the kes configuration is correct
|
||||
func (k KesConfig) Verify() (err error) {
|
||||
switch {
|
||||
case k.Endpoint == "":
|
||||
case len(k.Endpoint) == 0:
|
||||
err = Errorf("crypto: missing kes endpoint")
|
||||
case k.CertFile == "":
|
||||
err = Errorf("crypto: missing cert file")
|
||||
@@ -98,7 +101,7 @@ func (k KesConfig) Verify() (err error) {
|
||||
type kesService struct {
|
||||
client *kesClient
|
||||
|
||||
endpoint string
|
||||
endpoints []string
|
||||
defaultKeyID string
|
||||
}
|
||||
|
||||
@@ -113,23 +116,37 @@ func NewKes(cfg KesConfig) (KMS, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certPool, err := loadCACertificates(cfg.CAPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if cfg.Transport.TLSClientConfig != nil {
|
||||
if err = loadCACertificates(cfg.CAPath,
|
||||
cfg.Transport.TLSClientConfig.RootCAs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
if rootCAs == nil {
|
||||
// In some systems (like Windows) system cert pool is
|
||||
// not supported or no certificates are present on the
|
||||
// system - so we create a new cert pool.
|
||||
rootCAs = x509.NewCertPool()
|
||||
}
|
||||
if err = loadCACertificates(cfg.CAPath, rootCAs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.Transport.TLSClientConfig = &tls.Config{
|
||||
RootCAs: rootCAs,
|
||||
}
|
||||
}
|
||||
cfg.Transport.TLSClientConfig = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
RootCAs: certPool,
|
||||
}
|
||||
cfg.Transport.ForceAttemptHTTP2 = true
|
||||
cfg.Transport.TLSClientConfig.Certificates = []tls.Certificate{cert}
|
||||
cfg.Transport.TLSClientConfig.NextProtos = []string{"h2"}
|
||||
|
||||
return &kesService{
|
||||
client: &kesClient{
|
||||
addr: cfg.Endpoint,
|
||||
endpoints: cfg.Endpoint,
|
||||
httpClient: http.Client{
|
||||
Transport: cfg.Transport,
|
||||
},
|
||||
},
|
||||
endpoint: cfg.Endpoint,
|
||||
endpoints: cfg.Endpoint,
|
||||
defaultKeyID: cfg.DefaultKeyID,
|
||||
}, nil
|
||||
}
|
||||
@@ -146,9 +163,9 @@ func (kes *kesService) DefaultKeyID() string {
|
||||
// method.
|
||||
func (kes *kesService) Info() KMSInfo {
|
||||
return KMSInfo{
|
||||
Endpoint: kes.endpoint,
|
||||
Name: kes.DefaultKeyID(),
|
||||
AuthType: "TLS",
|
||||
Endpoints: kes.endpoints,
|
||||
Name: kes.DefaultKeyID(),
|
||||
AuthType: "TLS",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -204,7 +221,7 @@ func (kes *kesService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (k
|
||||
// • GenerateDataKey (API: /v1/key/generate/)
|
||||
// • DecryptDataKey (API: /v1/key/decrypt/)
|
||||
type kesClient struct {
|
||||
addr string
|
||||
endpoints []string
|
||||
httpClient http.Client
|
||||
}
|
||||
|
||||
@@ -215,8 +232,8 @@ type kesClient struct {
|
||||
// application does not have the cryptographic key at
|
||||
// any point in time.
|
||||
func (c *kesClient) CreateKey(name string) error {
|
||||
url := fmt.Sprintf("%s/v1/key/create/%s", c.addr, url.PathEscape(name))
|
||||
_, err := c.postRetry(url, nil, 0) // No request body and no response expected
|
||||
path := fmt.Sprintf("/v1/key/create/%s", url.PathEscape(name))
|
||||
_, err := c.postRetry(path, nil, 0) // No request body and no response expected
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -248,8 +265,8 @@ func (c *kesClient) GenerateDataKey(name string, context []byte) ([]byte, []byte
|
||||
}
|
||||
|
||||
const limit = 1 << 20 // A plaintext/ciphertext key pair will never be larger than 1 MB
|
||||
url := fmt.Sprintf("%s/v1/key/generate/%s", c.addr, url.PathEscape(name))
|
||||
resp, err := c.postRetry(url, bytes.NewReader(body), limit)
|
||||
path := fmt.Sprintf("/v1/key/generate/%s", url.PathEscape(name))
|
||||
resp, err := c.postRetry(path, bytes.NewReader(body), limit)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -285,8 +302,8 @@ func (c *kesClient) DecryptDataKey(name string, ciphertext, context []byte) ([]b
|
||||
}
|
||||
|
||||
const limit = 1 << 20 // A data key will never be larger than 1 MiB
|
||||
url := fmt.Sprintf("%s/v1/key/decrypt/%s", c.addr, url.PathEscape(name))
|
||||
resp, err := c.postRetry(url, bytes.NewReader(body), limit)
|
||||
path := fmt.Sprintf("/v1/key/decrypt/%s", url.PathEscape(name))
|
||||
resp, err := c.postRetry(path, bytes.NewReader(body), limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -357,7 +374,16 @@ func parseErrorResponse(resp *http.Response) error {
|
||||
}
|
||||
|
||||
func (c *kesClient) post(url string, body io.Reader, limit int64) (io.Reader, error) {
|
||||
resp, err := c.httpClient.Post(url, "application/json", body)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -376,26 +402,34 @@ func (c *kesClient) post(url string, body io.Reader, limit int64) (io.Reader, er
|
||||
return &respBody, nil
|
||||
}
|
||||
|
||||
func (c *kesClient) postRetry(url string, body io.ReadSeeker, limit int64) (io.Reader, error) {
|
||||
func (c *kesClient) postRetry(path string, body io.ReadSeeker, limit int64) (io.Reader, error) {
|
||||
retryMax := 1 + len(c.endpoints)
|
||||
for i := 0; ; i++ {
|
||||
if body != nil {
|
||||
body.Seek(0, io.SeekStart) // seek to the beginning of the body.
|
||||
}
|
||||
response, err := c.post(url, body, limit)
|
||||
|
||||
response, err := c.post(c.endpoints[i%len(c.endpoints)]+path, body, limit)
|
||||
if err == nil {
|
||||
return response, nil
|
||||
}
|
||||
|
||||
if !xnet.IsNetworkOrHostDown(err) && !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
// If the error is not temp. / retryable => fail the request immediately.
|
||||
if !xnet.IsNetworkOrHostDown(err) &&
|
||||
!errors.Is(err, io.EOF) &&
|
||||
!errors.Is(err, io.ErrUnexpectedEOF) &&
|
||||
!errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// retriable network errors.
|
||||
remain := retryMax - i
|
||||
if remain <= 0 {
|
||||
if remain := retryMax - i; remain <= 0 { // Fail if we exceeded our retry limit.
|
||||
return response, err
|
||||
}
|
||||
|
||||
// If there are more KES instances then skip waiting and
|
||||
// try the next endpoint directly.
|
||||
if i < len(c.endpoints) {
|
||||
continue
|
||||
}
|
||||
<-time.After(LinearJitterBackoff(retryWaitMin, retryWaitMax, i))
|
||||
}
|
||||
}
|
||||
@@ -413,24 +447,17 @@ func (c *kesClient) postRetry(url string, body io.ReadSeeker, limit int64) (io.R
|
||||
// file as PEM-encoded certificate and add it to
|
||||
// the CertPool. If a file is not a PEM certificate
|
||||
// it will be ignored.
|
||||
func loadCACertificates(path string) (*x509.CertPool, error) {
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
if rootCAs == nil {
|
||||
// In some systems (like Windows) system cert pool is
|
||||
// not supported or no certificates are present on the
|
||||
// system - so we create a new cert pool.
|
||||
rootCAs = x509.NewCertPool()
|
||||
}
|
||||
func loadCACertificates(path string, rootCAs *x509.CertPool) error {
|
||||
if path == "" {
|
||||
return rootCAs, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
stat, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return rootCAs, nil
|
||||
return nil
|
||||
}
|
||||
return nil, Errorf("crypto: cannot open '%s': %v", path, err)
|
||||
return Errorf("crypto: cannot open '%s': %v", path, err)
|
||||
}
|
||||
|
||||
// If path is a file, parse as PEM-encoded certifcate
|
||||
@@ -439,12 +466,12 @@ func loadCACertificates(path string) (*x509.CertPool, error) {
|
||||
if !stat.IsDir() {
|
||||
cert, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if !rootCAs.AppendCertsFromPEM(cert) {
|
||||
return nil, Errorf("crypto: '%s' is not a valid PEM-encoded certificate", path)
|
||||
return Errorf("crypto: '%s' is not a valid PEM-encoded certificate", path)
|
||||
}
|
||||
return rootCAs, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// If path is a directory then try
|
||||
@@ -454,7 +481,7 @@ func loadCACertificates(path string) (*x509.CertPool, error) {
|
||||
// we ignore it.
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
for _, file := range files {
|
||||
cert, err := ioutil.ReadFile(filepath.Join(path, file.Name()))
|
||||
@@ -463,6 +490,6 @@ func loadCACertificates(path string) (*x509.CertPool, error) {
|
||||
}
|
||||
rootCAs.AppendCertsFromPEM(cert) // ignore files which are not PEM certtificates
|
||||
}
|
||||
return rootCAs, nil
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
@@ -109,9 +109,9 @@ type masterKeyKMS struct {
|
||||
// KMSInfo contains some describing information about
|
||||
// the KMS.
|
||||
type KMSInfo struct {
|
||||
Endpoint string
|
||||
Name string
|
||||
AuthType string
|
||||
Endpoints []string
|
||||
Name string
|
||||
AuthType string
|
||||
}
|
||||
|
||||
// NewMasterKey returns a basic KMS implementation from a single 256 bit master key.
|
||||
@@ -147,9 +147,9 @@ func (kms *masterKeyKMS) GenerateKey(keyID string, ctx Context) (key [32]byte, s
|
||||
// KMS is configured directly using master key
|
||||
func (kms *masterKeyKMS) Info() (info KMSInfo) {
|
||||
return KMSInfo{
|
||||
Endpoint: "",
|
||||
Name: "",
|
||||
AuthType: "master-key",
|
||||
Endpoints: []string{},
|
||||
Name: "",
|
||||
AuthType: "master-key",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
@@ -38,6 +39,8 @@ func IsMultiPart(metadata map[string]string) bool {
|
||||
func RemoveSensitiveEntries(metadata map[string]string) { // The functions is tested in TestRemoveSensitiveHeaders for compatibility reasons
|
||||
delete(metadata, SSECKey)
|
||||
delete(metadata, SSECopyKey)
|
||||
delete(metadata, xhttp.AmzMetaUnencryptedContentLength)
|
||||
delete(metadata, xhttp.AmzMetaUnencryptedContentMD5)
|
||||
}
|
||||
|
||||
// RemoveSSEHeaders removes all crypto-specific SSE
|
||||
@@ -62,6 +65,17 @@ func RemoveInternalEntries(metadata map[string]string) {
|
||||
delete(metadata, S3KMSSealedKey)
|
||||
}
|
||||
|
||||
// IsSourceEncrypted returns true if the source is encrypted
|
||||
func IsSourceEncrypted(metadata map[string]string) bool {
|
||||
if _, ok := metadata[SSECAlgorithm]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := metadata[SSEHeader]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsEncrypted returns true if the object metadata indicates
|
||||
// that it was uploaded using some form of server-side-encryption.
|
||||
//
|
||||
@@ -115,7 +129,7 @@ func (ssec) IsEncrypted(metadata map[string]string) bool {
|
||||
// metadata is nil.
|
||||
func CreateMultipartMetadata(metadata map[string]string) map[string]string {
|
||||
if metadata == nil {
|
||||
metadata = map[string]string{}
|
||||
return map[string]string{SSEMultipart: ""}
|
||||
}
|
||||
metadata[SSEMultipart] = ""
|
||||
return metadata
|
||||
@@ -142,7 +156,7 @@ func (s3) CreateMetadata(metadata map[string]string, keyID string, kmsKey []byte
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = map[string]string{}
|
||||
metadata = make(map[string]string, 5)
|
||||
}
|
||||
|
||||
metadata[SSESealAlgorithm] = sealedKey.Algorithm
|
||||
@@ -222,7 +236,7 @@ func (ssec) CreateMetadata(metadata map[string]string, sealedKey SealedKey) map[
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = map[string]string{}
|
||||
metadata = make(map[string]string, 3)
|
||||
}
|
||||
metadata[SSESealAlgorithm] = SealAlgorithm
|
||||
metadata[SSEIV] = base64.StdEncoding.EncodeToString(sealedKey.IV[:])
|
||||
|
||||
@@ -21,9 +21,8 @@ import (
|
||||
|
||||
// default retry configuration
|
||||
const (
|
||||
retryWaitMin = 500 * time.Millisecond // minimum retry limit.
|
||||
retryWaitMax = 3 * time.Second // 3 secs worth of max retry.
|
||||
retryMax = 2
|
||||
retryWaitMin = 100 * time.Millisecond // minimum retry limit.
|
||||
retryWaitMax = 1500 * time.Millisecond // 1.5 secs worth of max retry.
|
||||
)
|
||||
|
||||
// LinearJitterBackoff provides the time.Duration for a caller to
|
||||
|
||||
@@ -199,13 +199,13 @@ func (v *vaultService) DefaultKeyID() string {
|
||||
}
|
||||
|
||||
// Info returns some information about the Vault,
|
||||
// configuration - like the endpoint or authentication
|
||||
// configuration - like the endpoints or authentication
|
||||
// method.
|
||||
func (v *vaultService) Info() KMSInfo {
|
||||
return KMSInfo{
|
||||
Endpoint: v.config.Endpoint,
|
||||
Name: v.DefaultKeyID(),
|
||||
AuthType: v.config.Auth.Type,
|
||||
Endpoints: []string{v.config.Endpoint},
|
||||
Name: v.DefaultKeyID(),
|
||||
AuthType: v.config.Auth.Type,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/crawler"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
@@ -35,6 +36,7 @@ import (
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/willf/bloom"
|
||||
)
|
||||
|
||||
@@ -44,6 +46,18 @@ const (
|
||||
dataCrawlStartDelay = 5 * time.Minute // Time to wait on startup and between cycles.
|
||||
dataUsageUpdateDirCycles = 16 // Visit all folders every n cycles.
|
||||
|
||||
healDeleteDangling = true
|
||||
healFolderIncludeProb = 32 // Include a clean folder one in n cycles.
|
||||
healObjectSelectProb = 512 // Overall probability of a file being scanned; one in n.
|
||||
|
||||
// sleep for an hour after a lock timeout
|
||||
// before retrying to acquire lock again.
|
||||
dataCrawlerLeaderLockTimeoutSleepInterval = time.Hour
|
||||
)
|
||||
|
||||
var (
|
||||
globalCrawlerConfig crawler.Config
|
||||
dataCrawlerLeaderLockTimeout = newDynamicTimeout(1*time.Minute, 30*time.Second)
|
||||
)
|
||||
|
||||
// initDataCrawler will start the crawler unless disabled.
|
||||
@@ -57,6 +71,18 @@ func initDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
// The function will block until the context is canceled.
|
||||
// There should only ever be one crawler running per cluster.
|
||||
func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Make sure only 1 crawler is running on the cluster.
|
||||
locker := objAPI.NewNSLock(ctx, minioMetaBucket, "runDataCrawler.lock")
|
||||
for {
|
||||
err := locker.GetLock(dataCrawlerLeaderLockTimeout)
|
||||
if err != nil {
|
||||
time.Sleep(dataCrawlerLeaderLockTimeoutSleepInterval)
|
||||
continue
|
||||
}
|
||||
break
|
||||
// No unlock for "leader" lock.
|
||||
}
|
||||
|
||||
// Load current bloom cycle
|
||||
nextBloomCycle := intDataUpdateTracker.current() + 1
|
||||
var buf bytes.Buffer
|
||||
@@ -87,12 +113,6 @@ func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
if err == nil {
|
||||
// Store new cycle...
|
||||
nextBloomCycle++
|
||||
if nextBloomCycle%dataUpdateTrackerResetEvery == 0 {
|
||||
if intDataUpdateTracker.debug {
|
||||
logger.Info(color.Green("runDataCrawler:") + " Resetting bloom filter for next runs.")
|
||||
}
|
||||
nextBloomCycle++
|
||||
}
|
||||
var tmp [8]byte
|
||||
binary.LittleEndian.PutUint64(tmp[:], nextBloomCycle)
|
||||
r, err := hash.NewReader(bytes.NewReader(tmp[:]), int64(len(tmp)), "", "", int64(len(tmp)), false)
|
||||
@@ -111,8 +131,9 @@ func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
|
||||
type cachedFolder struct {
|
||||
name string
|
||||
parent *dataUsageHash
|
||||
name string
|
||||
parent *dataUsageHash
|
||||
objectHealProbDiv uint32
|
||||
}
|
||||
|
||||
type folderScanner struct {
|
||||
@@ -125,6 +146,8 @@ type folderScanner struct {
|
||||
|
||||
dataUsageCrawlMult float64
|
||||
dataUsageCrawlDebug bool
|
||||
healFolderInclude uint32 // Include a clean folder one in n cycles.
|
||||
healObjectSelect uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude
|
||||
|
||||
newFolders []cachedFolder
|
||||
existingFolders []cachedFolder
|
||||
@@ -167,8 +190,17 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
existingFolders: nil,
|
||||
dataUsageCrawlMult: delayMult,
|
||||
dataUsageCrawlDebug: intDataUpdateTracker.debug,
|
||||
healFolderInclude: 0,
|
||||
healObjectSelect: 0,
|
||||
}
|
||||
|
||||
// Enable healing in XL mode.
|
||||
if globalIsErasure {
|
||||
// Include a clean folder one in n cycles.
|
||||
s.healFolderInclude = healFolderIncludeProb
|
||||
// Do a heal check on an object once every n cycles. Must divide into healFolderInclude
|
||||
s.healObjectSelect = healObjectSelectProb
|
||||
}
|
||||
if len(cache.Info.BloomFilter) > 0 {
|
||||
s.withFilter = &bloomFilter{BloomFilter: &bloom.BloomFilter{}}
|
||||
_, err := s.withFilter.ReadFrom(bytes.NewBuffer(cache.Info.BloomFilter))
|
||||
@@ -189,7 +221,7 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
}
|
||||
|
||||
// Always scan flattenLevels deep. Cache root is level 0.
|
||||
todo := []cachedFolder{{name: cache.Info.Name}}
|
||||
todo := []cachedFolder{{name: cache.Info.Name, objectHealProbDiv: 1}}
|
||||
for i := 0; i < flattenLevels; i++ {
|
||||
if s.dataUsageCrawlDebug {
|
||||
logger.Info(logPrefix+"Level %v, scanning %v directories."+logSuffix, i, len(todo))
|
||||
@@ -218,7 +250,7 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
return s.newCache, ctx.Err()
|
||||
default:
|
||||
}
|
||||
du, err := s.deepScanFolder(ctx, folder.name)
|
||||
du, err := s.deepScanFolder(ctx, folder)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
@@ -249,26 +281,38 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
}
|
||||
h := hashPath(folder.name)
|
||||
if !h.mod(s.oldCache.Info.NextCycle, dataUsageUpdateDirCycles) {
|
||||
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
|
||||
continue
|
||||
if !h.mod(s.oldCache.Info.NextCycle, s.healFolderInclude/folder.objectHealProbDiv) {
|
||||
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
|
||||
continue
|
||||
} else {
|
||||
folder.objectHealProbDiv = s.healFolderInclude
|
||||
}
|
||||
folder.objectHealProbDiv = dataUsageUpdateDirCycles
|
||||
}
|
||||
|
||||
if s.withFilter != nil {
|
||||
_, prefix := path2BucketObjectWithBasePath(basePath, folder.name)
|
||||
if s.oldCache.Info.lifeCycle == nil || !s.oldCache.Info.lifeCycle.HasActiveRules(prefix, true) {
|
||||
// If folder isn't in filter, skip it completely.
|
||||
if !s.withFilter.containsDir(folder.name) {
|
||||
if s.dataUsageCrawlDebug {
|
||||
logger.Info(logPrefix+"Skipping non-updated folder: %v"+logSuffix, folder)
|
||||
if !h.mod(s.oldCache.Info.NextCycle, s.healFolderInclude/folder.objectHealProbDiv) {
|
||||
if s.dataUsageCrawlDebug {
|
||||
logger.Info(logPrefix+"Skipping non-updated folder: %v"+logSuffix, folder)
|
||||
}
|
||||
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
|
||||
continue
|
||||
} else {
|
||||
if s.dataUsageCrawlDebug {
|
||||
logger.Info(logPrefix+"Adding non-updated folder to heal check: %v"+logSuffix, folder.name)
|
||||
}
|
||||
// Update probability of including objects
|
||||
folder.objectHealProbDiv = s.healFolderInclude
|
||||
}
|
||||
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update on this cycle...
|
||||
du, err := s.deepScanFolder(ctx, folder.name)
|
||||
du, err := s.deepScanFolder(ctx, folder)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
@@ -301,15 +345,16 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
default:
|
||||
}
|
||||
thisHash := hashPath(folder.name)
|
||||
existing := f.oldCache.findChildrenCopy(thisHash)
|
||||
|
||||
// If there are lifecycle rules for the prefix, remove the filter.
|
||||
filter := f.withFilter
|
||||
var activeLifeCycle *lifecycle.Lifecycle
|
||||
if f.oldCache.Info.lifeCycle != nil && filter != nil {
|
||||
if f.oldCache.Info.lifeCycle != nil {
|
||||
_, prefix := path2BucketObjectWithBasePath(f.root, folder.name)
|
||||
if f.oldCache.Info.lifeCycle.HasActiveRules(prefix, true) {
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("data-usage:")+" Prefix %q has active rules", prefix)
|
||||
logger.Info(color.Green("folder-scanner:")+" Prefix %q has active rules", prefix)
|
||||
}
|
||||
activeLifeCycle = f.oldCache.Info.lifeCycle
|
||||
filter = nil
|
||||
@@ -318,11 +363,19 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
if _, ok := f.oldCache.Cache[thisHash.Key()]; filter != nil && ok {
|
||||
// If folder isn't in filter and we have data, skip it completely.
|
||||
if folder.name != dataUsageRoot && !filter.containsDir(folder.name) {
|
||||
f.newCache.copyWithChildren(&f.oldCache, thisHash, folder.parent)
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("data-usage:")+" Skipping non-updated folder: %v", folder.name)
|
||||
if !thisHash.mod(f.oldCache.Info.NextCycle, f.healFolderInclude/folder.objectHealProbDiv) {
|
||||
f.newCache.copyWithChildren(&f.oldCache, thisHash, folder.parent)
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("folder-scanner:")+" Skipping non-updated folder: %v", folder.name)
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("folder-scanner:")+" Adding non-updated folder to heal check: %v", folder.name)
|
||||
}
|
||||
// If probability was already crawlerHealFolderInclude, keep it.
|
||||
folder.objectHealProbDiv = f.healFolderInclude
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
f.waitForLowActiveIO()
|
||||
@@ -336,14 +389,14 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
bucket, prefix := path2BucketObjectWithBasePath(f.root, entName)
|
||||
if bucket == "" {
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("data-usage:")+" no bucket (%s,%s)", f.root, entName)
|
||||
logger.Info(color.Green("folder-scanner:")+" no bucket (%s,%s)", f.root, entName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if isReservedOrInvalidBucket(bucket, false) {
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("data-usage:")+" invalid bucket: %v, entry: %v", bucket, entName)
|
||||
logger.Info(color.Green("folder-scanner:")+" invalid bucket: %v, entry: %v", bucket, entName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -359,7 +412,8 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
_, exists := f.oldCache.Cache[h.Key()]
|
||||
cache.addChildString(entName)
|
||||
|
||||
this := cachedFolder{name: entName, parent: &thisHash}
|
||||
this := cachedFolder{name: entName, parent: &thisHash, objectHealProbDiv: folder.objectHealProbDiv}
|
||||
delete(existing, h.Key())
|
||||
cache.addChild(h)
|
||||
if final {
|
||||
if exists {
|
||||
@@ -385,11 +439,12 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
objectName: path.Base(entName),
|
||||
debug: f.dataUsageCrawlDebug,
|
||||
lifeCycle: activeLifeCycle,
|
||||
heal: thisHash.mod(f.oldCache.Info.NextCycle, f.healObjectSelect/folder.objectHealProbDiv),
|
||||
}
|
||||
size, err := f.getSize(item)
|
||||
|
||||
sleepDuration(time.Since(t), f.dataUsageCrawlMult)
|
||||
if err == errSkipFile || err == errFileNotFound {
|
||||
if err == errSkipFile {
|
||||
return nil
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
@@ -402,19 +457,78 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if f.healObjectSelect == 0 {
|
||||
// If we are not scanning, return now.
|
||||
f.newCache.replaceHashed(thisHash, folder.parent, cache)
|
||||
continue
|
||||
}
|
||||
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
bgSeq, found := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
// Whatever remains in 'existing' are folders at this level
|
||||
// that existed in the previous run but wasn't found now.
|
||||
//
|
||||
// This may be because of 2 reasons:
|
||||
//
|
||||
// 1) The folder/object was deleted.
|
||||
// 2) We come from another disk and this disk missed the write.
|
||||
//
|
||||
// We therefore perform a heal check.
|
||||
// If that doesn't bring it back we remove the folder and assume it was deleted.
|
||||
// This means that the next run will not look for it.
|
||||
for k := range existing {
|
||||
f.waitForLowActiveIO()
|
||||
bucket, prefix := path2BucketObject(k)
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("folder-scanner:")+" checking disappeared folder: %v/%v", bucket, prefix)
|
||||
}
|
||||
|
||||
err = objAPI.HealObjects(ctx, bucket, prefix, madmin.HealOpts{Recursive: true, Remove: healDeleteDangling},
|
||||
func(bucket, object, versionID string) error {
|
||||
return bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
})
|
||||
|
||||
if f.dataUsageCrawlDebug && err != nil {
|
||||
logger.Info(color.Green("healObjects:")+" checking returned value %v", err)
|
||||
}
|
||||
|
||||
// Add unless healing returned an error.
|
||||
if err == nil {
|
||||
this := cachedFolder{name: k, parent: &thisHash, objectHealProbDiv: folder.objectHealProbDiv}
|
||||
cache.addChild(hashPath(k))
|
||||
if final {
|
||||
f.existingFolders = append(f.existingFolders, this)
|
||||
} else {
|
||||
nextFolders = append(nextFolders, this)
|
||||
}
|
||||
}
|
||||
}
|
||||
f.newCache.replaceHashed(thisHash, folder.parent, cache)
|
||||
}
|
||||
return nextFolders, nil
|
||||
}
|
||||
|
||||
// deepScanFolder will deep scan a folder and return the size if no error occurs.
|
||||
func (f *folderScanner) deepScanFolder(ctx context.Context, folder string) (*dataUsageEntry, error) {
|
||||
func (f *folderScanner) deepScanFolder(ctx context.Context, folder cachedFolder) (*dataUsageEntry, error) {
|
||||
var cache dataUsageEntry
|
||||
|
||||
done := ctx.Done()
|
||||
|
||||
var addDir func(entName string, typ os.FileMode) error
|
||||
var dirStack = []string{f.root, folder}
|
||||
var dirStack = []string{f.root, folder.name}
|
||||
|
||||
addDir = func(entName string, typ os.FileMode) error {
|
||||
select {
|
||||
@@ -445,7 +559,7 @@ func (f *folderScanner) deepScanFolder(ctx context.Context, folder string) (*dat
|
||||
if f.oldCache.Info.lifeCycle != nil {
|
||||
if f.oldCache.Info.lifeCycle.HasActiveRules(prefix, false) {
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("data-usage:")+" Prefix %q has active rules", prefix)
|
||||
logger.Info(color.Green("folder-scanner:")+" Prefix %q has active rules", prefix)
|
||||
}
|
||||
activeLifeCycle = f.oldCache.Info.lifeCycle
|
||||
}
|
||||
@@ -460,6 +574,7 @@ func (f *folderScanner) deepScanFolder(ctx context.Context, folder string) (*dat
|
||||
objectName: path.Base(entName),
|
||||
debug: f.dataUsageCrawlDebug,
|
||||
lifeCycle: activeLifeCycle,
|
||||
heal: hashPath(path.Join(prefix, entName)).mod(f.oldCache.Info.NextCycle, f.healObjectSelect/folder.objectHealProbDiv),
|
||||
})
|
||||
|
||||
// Don't sleep for really small amount of time
|
||||
@@ -490,6 +605,7 @@ type crawlItem struct {
|
||||
prefix string // Only the prefix if any, does not have final object name.
|
||||
objectName string // Only the object name without prefixes.
|
||||
lifeCycle *lifecycle.Lifecycle
|
||||
heal bool // Has the object been selected for heal check?
|
||||
debug bool
|
||||
}
|
||||
|
||||
@@ -509,9 +625,9 @@ func (i *crawlItem) transformMetaDir() {
|
||||
|
||||
// actionMeta contains information used to apply actions.
|
||||
type actionMeta struct {
|
||||
oi ObjectInfo
|
||||
trustOI bool // Set true if oi can be trusted and has been read with quorum.
|
||||
numVersions int // The number of versions of this object
|
||||
oi ObjectInfo
|
||||
successorModTime time.Time // The modtime of the successor version
|
||||
numVersions int // The number of versions of this object
|
||||
}
|
||||
|
||||
// applyActions will apply lifecycle checks on to a scanned item.
|
||||
@@ -523,6 +639,20 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
|
||||
if i.debug {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
if i.heal {
|
||||
if i.debug {
|
||||
logger.Info(color.Green("applyActions:")+" heal checking: %v/%v v%s", i.bucket, i.objectPath(), meta.oi.VersionID)
|
||||
}
|
||||
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, madmin.HealOpts{Remove: healDeleteDangling})
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return 0
|
||||
}
|
||||
if err != nil && !errors.Is(err, NotImplemented{}) {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
size = res.ObjectSize
|
||||
}
|
||||
if i.lifeCycle == nil {
|
||||
return size
|
||||
}
|
||||
@@ -530,16 +660,17 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
|
||||
versionID := meta.oi.VersionID
|
||||
action := i.lifeCycle.ComputeAction(
|
||||
lifecycle.ObjectOpts{
|
||||
Name: i.objectPath(),
|
||||
UserTags: meta.oi.UserTags,
|
||||
ModTime: meta.oi.ModTime,
|
||||
VersionID: meta.oi.VersionID,
|
||||
DeleteMarker: meta.oi.DeleteMarker,
|
||||
IsLatest: meta.oi.IsLatest,
|
||||
NumVersions: meta.numVersions,
|
||||
Name: i.objectPath(),
|
||||
UserTags: meta.oi.UserTags,
|
||||
ModTime: meta.oi.ModTime,
|
||||
VersionID: meta.oi.VersionID,
|
||||
DeleteMarker: meta.oi.DeleteMarker,
|
||||
IsLatest: meta.oi.IsLatest,
|
||||
NumVersions: meta.numVersions,
|
||||
SuccessorModTime: meta.successorModTime,
|
||||
})
|
||||
if i.debug {
|
||||
logger.Info(color.Green("applyActions:")+" lifecycle: %q, Initial scan: %v", i.objectPath(), action)
|
||||
logger.Info(color.Green("applyActions:")+" lifecycle: %q (version-id=%s), Initial scan: %v", i.objectPath(), versionID, action)
|
||||
}
|
||||
switch action {
|
||||
case lifecycle.DeleteAction, lifecycle.DeleteVersionAction:
|
||||
@@ -548,63 +679,72 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
|
||||
return size
|
||||
}
|
||||
|
||||
// These (expensive) operations should only run on items we are likely to delete.
|
||||
// Load to ensure that we have the correct version and not an unsynced version.
|
||||
if !meta.trustOI {
|
||||
obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{
|
||||
VersionID: versionID,
|
||||
})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case MethodNotAllowed: // This happens usually for a delete marker
|
||||
if !obj.DeleteMarker { // if this is not a delete marker log and return
|
||||
// Do nothing - heal in the future.
|
||||
logger.LogIf(ctx, err)
|
||||
return size
|
||||
}
|
||||
case ObjectNotFound:
|
||||
// object not found return 0
|
||||
return 0
|
||||
default:
|
||||
// All other errors proceed.
|
||||
obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{
|
||||
VersionID: versionID,
|
||||
})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case MethodNotAllowed: // This happens usually for a delete marker
|
||||
if !obj.DeleteMarker { // if this is not a delete marker log and return
|
||||
// Do nothing - heal in the future.
|
||||
logger.LogIf(ctx, err)
|
||||
return size
|
||||
}
|
||||
}
|
||||
size = obj.Size
|
||||
|
||||
// Recalculate action.
|
||||
action = i.lifeCycle.ComputeAction(
|
||||
lifecycle.ObjectOpts{
|
||||
Name: i.objectPath(),
|
||||
UserTags: obj.UserTags,
|
||||
ModTime: obj.ModTime,
|
||||
VersionID: obj.VersionID,
|
||||
DeleteMarker: obj.DeleteMarker,
|
||||
IsLatest: obj.IsLatest,
|
||||
NumVersions: meta.numVersions,
|
||||
})
|
||||
if i.debug {
|
||||
logger.Info(color.Green("applyActions:")+" lifecycle: Secondary scan: %v", action)
|
||||
}
|
||||
versionID = obj.VersionID
|
||||
switch action {
|
||||
case lifecycle.DeleteAction, lifecycle.DeleteVersionAction:
|
||||
case ObjectNotFound:
|
||||
// object not found return 0
|
||||
return 0
|
||||
default:
|
||||
// No action.
|
||||
// All other errors proceed.
|
||||
logger.LogIf(ctx, err)
|
||||
return size
|
||||
}
|
||||
}
|
||||
size = obj.Size
|
||||
|
||||
// Recalculate action.
|
||||
action = i.lifeCycle.ComputeAction(
|
||||
lifecycle.ObjectOpts{
|
||||
Name: i.objectPath(),
|
||||
UserTags: obj.UserTags,
|
||||
ModTime: obj.ModTime,
|
||||
VersionID: obj.VersionID,
|
||||
DeleteMarker: obj.DeleteMarker,
|
||||
IsLatest: obj.IsLatest,
|
||||
NumVersions: meta.numVersions,
|
||||
SuccessorModTime: meta.successorModTime,
|
||||
})
|
||||
if i.debug {
|
||||
logger.Info(color.Green("applyActions:")+" lifecycle: Secondary scan: %v", action)
|
||||
}
|
||||
switch action {
|
||||
case lifecycle.DeleteAction, lifecycle.DeleteVersionAction:
|
||||
default:
|
||||
// No action.
|
||||
return size
|
||||
}
|
||||
|
||||
opts := ObjectOptions{}
|
||||
switch action {
|
||||
case lifecycle.DeleteVersionAction:
|
||||
opts.VersionID = versionID
|
||||
// Defensive code, should never happen
|
||||
if obj.VersionID == "" {
|
||||
return size
|
||||
}
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(i.bucket); rcfg.LockEnabled {
|
||||
locked := enforceRetentionForDeletion(ctx, obj)
|
||||
if locked {
|
||||
if i.debug {
|
||||
logger.Info(color.Green("applyActions:")+" lifecycle: %s is locked, not deleting", i.objectPath())
|
||||
}
|
||||
return size
|
||||
}
|
||||
}
|
||||
opts.VersionID = obj.VersionID
|
||||
case lifecycle.DeleteAction:
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(i.bucket)
|
||||
}
|
||||
|
||||
obj, err := o.DeleteObject(ctx, i.bucket, i.objectPath(), opts)
|
||||
obj, err = o.DeleteObject(ctx, i.bucket, i.objectPath(), opts)
|
||||
if err != nil {
|
||||
// Assume it is still there.
|
||||
logger.LogIf(ctx, err)
|
||||
@@ -642,9 +782,6 @@ func sleepDuration(d time.Duration, x float64) {
|
||||
func (i *crawlItem) healReplication(ctx context.Context, o ObjectLayer, meta actionMeta) {
|
||||
if meta.oi.ReplicationStatus == replication.Pending ||
|
||||
meta.oi.ReplicationStatus == replication.Failed {
|
||||
// if heal encounters a pending replication status, either replication
|
||||
// has failed due to server shutdown or crawler and PutObject replication are in contention.
|
||||
healPending := meta.oi.ReplicationStatus == replication.Pending
|
||||
replicateObject(ctx, meta.oi.Bucket, meta.oi.Name, meta.oi.VersionID, o, nil, healPending)
|
||||
globalReplicationState.queueReplicaTask(meta.oi)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,9 +48,6 @@ const (
|
||||
dataUpdateTrackerFilename = dataUsageBucket + SlashSeparator + ".tracker.bin"
|
||||
dataUpdateTrackerVersion = 2
|
||||
dataUpdateTrackerSaveInterval = 5 * time.Minute
|
||||
|
||||
// Reset bloom filters every n cycle
|
||||
dataUpdateTrackerResetEvery = 1000
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -181,6 +178,8 @@ func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) {
|
||||
}
|
||||
d.load(ctx, drives...)
|
||||
go d.startCollector(ctx)
|
||||
// startSaver will unlock.
|
||||
d.mu.Lock()
|
||||
go d.startSaver(ctx, dataUpdateTrackerSaveInterval, drives)
|
||||
}
|
||||
|
||||
@@ -214,17 +213,17 @@ func (d *dataUpdateTracker) load(ctx context.Context, drives ...string) {
|
||||
}
|
||||
|
||||
// startSaver will start a saver that will write d to all supplied drives at specific intervals.
|
||||
// 'd' must be write locked when started and will be unlocked.
|
||||
// The saver will save and exit when supplied context is closed.
|
||||
func (d *dataUpdateTracker) startSaver(ctx context.Context, interval time.Duration, drives []string) {
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
var buf bytes.Buffer
|
||||
d.mu.Lock()
|
||||
saveNow := d.save
|
||||
exited := make(chan struct{})
|
||||
d.saveExited = exited
|
||||
d.mu.Unlock()
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
defer close(exited)
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
var exit bool
|
||||
select {
|
||||
@@ -427,6 +426,8 @@ func (d *dataUpdateTracker) deserialize(src io.Reader, newerThan time.Time) erro
|
||||
}
|
||||
// Ignore what remains on the stream.
|
||||
// Update d:
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.Current = dst.Current
|
||||
d.History = dst.History
|
||||
d.Saved = dst.Saved
|
||||
|
||||
@@ -42,6 +42,10 @@ type testingLogger struct {
|
||||
t testLoggerI
|
||||
}
|
||||
|
||||
func (t *testingLogger) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *testingLogger) Send(entry interface{}, errKind string) error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
@@ -85,7 +85,12 @@ func (e *dataUsageEntry) merge(other dataUsageEntry) {
|
||||
}
|
||||
|
||||
// mod returns true if the hash mod cycles == cycle.
|
||||
// If cycles is 0 false is always returned.
|
||||
// If cycles is 1 true is always returned (as expected).
|
||||
func (h dataUsageHash) mod(cycle uint32, cycles uint32) bool {
|
||||
if cycles <= 1 {
|
||||
return cycles == 1
|
||||
}
|
||||
return uint32(xxhash.Sum64String(string(h)))%cycles == cycle%cycles
|
||||
}
|
||||
|
||||
@@ -117,6 +122,16 @@ func (d *dataUsageCache) find(path string) *dataUsageEntry {
|
||||
return &due
|
||||
}
|
||||
|
||||
// findChildrenCopy returns a copy of the children of the supplied hash.
|
||||
func (d *dataUsageCache) findChildrenCopy(h dataUsageHash) dataUsageHashMap {
|
||||
ch := d.Cache[h.String()].Children
|
||||
res := make(dataUsageHashMap, len(ch))
|
||||
for k := range ch {
|
||||
res[k] = struct{}{}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Returns nil if not found.
|
||||
func (d *dataUsageCache) subCache(path string) dataUsageCache {
|
||||
dst := dataUsageCache{Info: dataUsageCacheInfo{
|
||||
@@ -328,7 +343,7 @@ func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]Bucke
|
||||
flat := d.flatten(*e)
|
||||
dst[bucket.Name] = BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
ObjectsCount: uint64(flat.Objects),
|
||||
ObjectsCount: flat.Objects,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
}
|
||||
}
|
||||
@@ -345,7 +360,7 @@ func (d *dataUsageCache) bucketUsageInfo(bucket string) BucketUsageInfo {
|
||||
flat := d.flatten(*e)
|
||||
return BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
ObjectsCount: uint64(flat.Objects),
|
||||
ObjectsCount: flat.Objects,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
}
|
||||
}
|
||||
@@ -413,10 +428,15 @@ func (d *dataUsageCache) merge(other dataUsageCache) {
|
||||
}
|
||||
}
|
||||
|
||||
type objectIO interface {
|
||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
}
|
||||
|
||||
// load the cache content with name from minioMetaBackgroundOpsBucket.
|
||||
// Only backend errors are returned as errors.
|
||||
// If the object is not found or unable to deserialize d is cleared and nil error is returned.
|
||||
func (d *dataUsageCache) load(ctx context.Context, store ObjectLayer, name string) error {
|
||||
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
|
||||
var buf bytes.Buffer
|
||||
err := store.GetObject(ctx, dataUsageBucket, name, 0, -1, &buf, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
@@ -435,7 +455,7 @@ func (d *dataUsageCache) load(ctx context.Context, store ObjectLayer, name strin
|
||||
}
|
||||
|
||||
// save the content of the cache to minioMetaBackgroundOpsBucket with the provided name.
|
||||
func (d *dataUsageCache) save(ctx context.Context, store ObjectLayer, name string) error {
|
||||
func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) error {
|
||||
b := d.serialize()
|
||||
size := int64(len(b))
|
||||
r, err := hash.NewReader(bytes.NewReader(b), size, "", "", size, false)
|
||||
|
||||
@@ -240,7 +240,6 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
t.Fatal("got nil result")
|
||||
}
|
||||
if w.flatten {
|
||||
t.Log(e.Children)
|
||||
*e = got.flatten(*e)
|
||||
}
|
||||
if e.Size != int64(w.size) {
|
||||
@@ -360,6 +359,13 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got.root() == nil {
|
||||
t.Log("cached folders:")
|
||||
for folder := range got.Cache {
|
||||
t.Log("folder:", folder)
|
||||
}
|
||||
t.Fatal("got nil root.")
|
||||
}
|
||||
|
||||
// Test dirs
|
||||
var want = []struct {
|
||||
|
||||
@@ -46,7 +46,7 @@ const (
|
||||
cacheMetaJSONFile = "cache.json"
|
||||
cacheDataFile = "part.1"
|
||||
cacheMetaVersion = "1.0.0"
|
||||
cacheExpiryDays = time.Duration(90 * time.Hour * 24) // defaults to 90 days
|
||||
cacheExpiryDays = 90 * time.Hour * 24 // defaults to 90 days
|
||||
// SSECacheEncrypted is the metadata key indicating that the object
|
||||
// is a cache entry encrypted with cache KMS master key in globalCacheKMS.
|
||||
SSECacheEncrypted = "X-Minio-Internal-Encrypted-Cache"
|
||||
@@ -157,7 +157,7 @@ func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCa
|
||||
}
|
||||
cache := diskCache{
|
||||
dir: dir,
|
||||
triggerGC: make(chan struct{}),
|
||||
triggerGC: make(chan struct{}, 1),
|
||||
stats: CacheDiskStats{Dir: dir},
|
||||
quotaPct: quotaPct,
|
||||
after: config.After,
|
||||
@@ -174,7 +174,7 @@ func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCa
|
||||
nsMutex: newNSLock(false),
|
||||
}
|
||||
go cache.purgeWait(ctx)
|
||||
cache.diskUsageHigh() // update if cache usage is already high.
|
||||
cache.diskSpaceAvailable(0) // update if cache usage is already high.
|
||||
cache.NewNSLockFn = func(ctx context.Context, cachePath string) RWLocker {
|
||||
return cache.nsMutex.NewNSLock(ctx, nil, cachePath, "")
|
||||
}
|
||||
@@ -203,9 +203,9 @@ func (c *diskCache) diskUsageLow() bool {
|
||||
return low
|
||||
}
|
||||
|
||||
// Returns if the disk usage reaches high water mark w.r.t the configured cache quota.
|
||||
// gc starts if high water mark reached.
|
||||
func (c *diskCache) diskUsageHigh() bool {
|
||||
// Returns if the disk usage reaches or exceeds configured cache quota when size is added.
|
||||
// If current usage without size exceeds high watermark a GC is automatically queued.
|
||||
func (c *diskCache) diskSpaceAvailable(size int64) bool {
|
||||
gcTriggerPct := c.quotaPct * c.highWatermark / 100
|
||||
di, err := disk.GetInfo(c.dir)
|
||||
if err != nil {
|
||||
@@ -214,27 +214,30 @@ func (c *diskCache) diskUsageHigh() bool {
|
||||
logger.LogIf(ctx, err)
|
||||
return false
|
||||
}
|
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total
|
||||
high := int(usedPercent) >= gcTriggerPct
|
||||
atomic.StoreUint64(&c.stats.UsagePercent, usedPercent)
|
||||
if high {
|
||||
atomic.StoreInt32(&c.stats.UsageState, 1)
|
||||
}
|
||||
return high
|
||||
}
|
||||
|
||||
// Returns if size space can be allocated without exceeding
|
||||
// max disk usable for caching
|
||||
func (c *diskCache) diskAvailable(size int64) bool {
|
||||
di, err := disk.GetInfo(c.dir)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
if di.Total == 0 {
|
||||
logger.Info("diskCache: Received 0 total disk size")
|
||||
return false
|
||||
}
|
||||
usedPercent := (di.Total - (di.Free - uint64(size))) * 100 / di.Total
|
||||
return int(usedPercent) < c.quotaPct
|
||||
usedPercent := float64(di.Total-di.Free) * 100 / float64(di.Total)
|
||||
if usedPercent >= float64(gcTriggerPct) {
|
||||
atomic.StoreInt32(&c.stats.UsageState, 1)
|
||||
c.queueGC()
|
||||
}
|
||||
atomic.StoreUint64(&c.stats.UsagePercent, uint64(usedPercent))
|
||||
|
||||
// Recalculate percentage with provided size added.
|
||||
usedPercent = float64(di.Total-di.Free+uint64(size)) * 100 / float64(di.Total)
|
||||
|
||||
return usedPercent < float64(c.quotaPct)
|
||||
}
|
||||
|
||||
// queueGC will queue a GC.
|
||||
// Calling this function is always non-blocking.
|
||||
func (c *diskCache) queueGC() {
|
||||
select {
|
||||
case c.triggerGC <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// toClear returns how many bytes should be cleared to reach the low watermark quota.
|
||||
@@ -247,7 +250,7 @@ func (c *diskCache) toClear() uint64 {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
return bytesToClear(int64(di.Total), int64(di.Free), uint64(c.quotaPct), uint64(c.lowWatermark))
|
||||
return bytesToClear(int64(di.Total), int64(di.Free), uint64(c.quotaPct), uint64(c.lowWatermark), uint64(c.highWatermark))
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -417,7 +420,7 @@ func (c *diskCache) Stat(ctx context.Context, bucket, object string) (oi ObjectI
|
||||
func (c *diskCache) statCachedMeta(ctx context.Context, cacheObjPath string) (meta *cacheMeta, partial bool, numHits int, err error) {
|
||||
|
||||
cLock := c.NewNSLockFn(ctx, cacheObjPath)
|
||||
if err = cLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
if err = cLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -499,7 +502,7 @@ func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *c
|
||||
func (c *diskCache) SaveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64, rs *HTTPRangeSpec, rsFileName string, incHitsOnly bool) error {
|
||||
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, cachedPath)
|
||||
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
||||
if err := cLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
@@ -658,14 +661,13 @@ func newCacheEncryptMetadata(bucket, object string, metadata map[string]string)
|
||||
|
||||
// Caches the object to disk
|
||||
func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Reader, size int64, rs *HTTPRangeSpec, opts ObjectOptions, incHitsOnly bool) error {
|
||||
if c.diskUsageHigh() {
|
||||
c.triggerGC <- struct{}{}
|
||||
if !c.diskSpaceAvailable(size) {
|
||||
io.Copy(ioutil.Discard, data)
|
||||
return errDiskFull
|
||||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, cachePath)
|
||||
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
||||
if err := cLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
@@ -688,16 +690,13 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
|
||||
if rs != nil {
|
||||
return c.putRange(ctx, bucket, object, data, size, rs, opts)
|
||||
}
|
||||
if !c.diskAvailable(size) {
|
||||
if !c.diskSpaceAvailable(size) {
|
||||
return errDiskFull
|
||||
}
|
||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
var metadata = make(map[string]string)
|
||||
for k, v := range opts.UserDefined {
|
||||
metadata[k] = v
|
||||
}
|
||||
var metadata = cloneMSS(opts.UserDefined)
|
||||
var reader = data
|
||||
var actualSize = uint64(size)
|
||||
if globalCacheKMS != nil {
|
||||
@@ -719,7 +718,7 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
|
||||
|
||||
if actualSize != uint64(n) {
|
||||
removeAll(cachePath)
|
||||
return IncompleteBody{}
|
||||
return IncompleteBody{Bucket: bucket, Object: object}
|
||||
}
|
||||
return c.saveMetadata(ctx, bucket, object, metadata, n, nil, "", incHitsOnly)
|
||||
}
|
||||
@@ -730,17 +729,14 @@ func (c *diskCache) putRange(ctx context.Context, bucket, object string, data io
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !c.diskAvailable(rlen) {
|
||||
if !c.diskSpaceAvailable(rlen) {
|
||||
return errDiskFull
|
||||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
var metadata = make(map[string]string)
|
||||
for k, v := range opts.UserDefined {
|
||||
metadata[k] = v
|
||||
}
|
||||
var metadata = cloneMSS(opts.UserDefined)
|
||||
var reader = data
|
||||
var actualSize = uint64(rlen)
|
||||
// objSize is the actual size of object (with encryption overhead if any)
|
||||
@@ -766,7 +762,7 @@ func (c *diskCache) putRange(ctx context.Context, bucket, object string, data io
|
||||
}
|
||||
if actualSize != uint64(n) {
|
||||
removeAll(cachePath)
|
||||
return IncompleteBody{}
|
||||
return IncompleteBody{Bucket: bucket, Object: object}
|
||||
}
|
||||
return c.saveMetadata(ctx, bucket, object, metadata, int64(objSize), rs, cacheFile, false)
|
||||
}
|
||||
@@ -871,7 +867,7 @@ func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, of
|
||||
func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, numHits int, err error) {
|
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, cacheObjPath)
|
||||
if err := cLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
if err := cLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
return nil, numHits, err
|
||||
}
|
||||
|
||||
@@ -935,7 +931,7 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
|
||||
// Deletes the cached object
|
||||
func (c *diskCache) delete(ctx context.Context, cacheObjPath string) (err error) {
|
||||
cLock := c.NewNSLockFn(ctx, cacheObjPath)
|
||||
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
||||
if err := cLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
|
||||
@@ -102,7 +102,7 @@ func (c *cacheControl) isStale(modTime time.Time) bool {
|
||||
func cacheControlOpts(o ObjectInfo) *cacheControl {
|
||||
c := cacheControl{}
|
||||
m := o.UserDefined
|
||||
if o.Expires != timeSentinel {
|
||||
if !o.Expires.Equal(timeSentinel) {
|
||||
c.expiry = o.Expires
|
||||
}
|
||||
|
||||
@@ -489,9 +489,15 @@ func (f *fileScorer) queueString() string {
|
||||
// bytesToClear() returns the number of bytes to clear to reach low watermark
|
||||
// w.r.t quota given disk total and free space, quota in % allocated to cache
|
||||
// and low watermark % w.r.t allowed quota.
|
||||
func bytesToClear(total, free int64, quotaPct, lowWatermark uint64) uint64 {
|
||||
used := (total - free)
|
||||
// If the high watermark hasn't been reached 0 will be returned.
|
||||
func bytesToClear(total, free int64, quotaPct, lowWatermark, highWatermark uint64) uint64 {
|
||||
used := total - free
|
||||
quotaAllowed := total * (int64)(quotaPct) / 100
|
||||
lowWMUsage := (total * (int64)(lowWatermark*quotaPct) / (100 * 100))
|
||||
highWMUsage := total * (int64)(highWatermark*quotaPct) / (100 * 100)
|
||||
if used < highWMUsage {
|
||||
return 0
|
||||
}
|
||||
// Return bytes needed to reach low watermark.
|
||||
lowWMUsage := total * (int64)(lowWatermark*quotaPct) / (100 * 100)
|
||||
return (uint64)(math.Min(float64(quotaAllowed), math.Max(0.0, float64(used-lowWMUsage))))
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestGetCacheControlOpts(t *testing.T) {
|
||||
t.Run("", func(t *testing.T) {
|
||||
m := make(map[string]string)
|
||||
m["cache-control"] = testCase.cacheControlHeaderVal
|
||||
if testCase.expiryHeaderVal != timeSentinel {
|
||||
if !testCase.expiryHeaderVal.Equal(timeSentinel) {
|
||||
m["expires"] = testCase.expiryHeaderVal.String()
|
||||
}
|
||||
c := cacheControlOpts(ObjectInfo{UserDefined: m, Expires: testCase.expiryHeaderVal})
|
||||
@@ -149,22 +149,26 @@ func TestNewFileScorer(t *testing.T) {
|
||||
}
|
||||
func TestBytesToClear(t *testing.T) {
|
||||
testCases := []struct {
|
||||
total int64
|
||||
free int64
|
||||
quotaPct uint64
|
||||
watermarkLow uint64
|
||||
expected uint64
|
||||
total int64
|
||||
free int64
|
||||
quotaPct uint64
|
||||
watermarkLow uint64
|
||||
watermarkHigh uint64
|
||||
expected uint64
|
||||
}{
|
||||
{1000, 800, 40, 90, 0},
|
||||
{1000, 200, 40, 90, 400},
|
||||
{1000, 400, 40, 90, 240},
|
||||
{1000, 600, 40, 90, 40},
|
||||
{1000, 600, 40, 70, 120},
|
||||
{1000, 1000, 90, 70, 0},
|
||||
{1000, 0, 90, 70, 370},
|
||||
{total: 1000, free: 800, quotaPct: 40, watermarkLow: 90, watermarkHigh: 90, expected: 0},
|
||||
{total: 1000, free: 200, quotaPct: 40, watermarkLow: 90, watermarkHigh: 90, expected: 400},
|
||||
{total: 1000, free: 400, quotaPct: 40, watermarkLow: 90, watermarkHigh: 90, expected: 240},
|
||||
{total: 1000, free: 600, quotaPct: 40, watermarkLow: 90, watermarkHigh: 90, expected: 40},
|
||||
{total: 1000, free: 600, quotaPct: 40, watermarkLow: 70, watermarkHigh: 70, expected: 120},
|
||||
{total: 1000, free: 1000, quotaPct: 90, watermarkLow: 70, watermarkHigh: 70, expected: 0},
|
||||
|
||||
// High not yet reached..
|
||||
{total: 1000, free: 250, quotaPct: 100, watermarkLow: 50, watermarkHigh: 90, expected: 0},
|
||||
{total: 1000, free: 250, quotaPct: 100, watermarkLow: 50, watermarkHigh: 90, expected: 0},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
toClear := bytesToClear(tc.total, tc.free, tc.quotaPct, tc.watermarkLow)
|
||||
toClear := bytesToClear(tc.total, tc.free, tc.quotaPct, tc.watermarkLow, tc.watermarkHigh)
|
||||
if tc.expected != toClear {
|
||||
t.Errorf("test %d expected %v, got %v", i, tc.expected, toClear)
|
||||
}
|
||||
|
||||
@@ -85,16 +85,15 @@ type cacheObjects struct {
|
||||
}
|
||||
|
||||
func (c *cacheObjects) incHitsToMeta(ctx context.Context, dcache *diskCache, bucket, object string, size int64, eTag string, rs *HTTPRangeSpec) error {
|
||||
metadata := make(map[string]string)
|
||||
metadata["etag"] = eTag
|
||||
metadata := map[string]string{"etag": eTag}
|
||||
return dcache.SaveMetadata(ctx, bucket, object, metadata, size, rs, "", true)
|
||||
}
|
||||
|
||||
// Backend metadata could have changed through server side copy - reset cache metadata if that is the case
|
||||
func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *diskCache, bucket, object string, bkObjectInfo, cacheObjInfo ObjectInfo, rs *HTTPRangeSpec) error {
|
||||
|
||||
bkMeta := make(map[string]string)
|
||||
cacheMeta := make(map[string]string)
|
||||
bkMeta := make(map[string]string, len(bkObjectInfo.UserDefined))
|
||||
cacheMeta := make(map[string]string, len(cacheObjInfo.UserDefined))
|
||||
for k, v := range bkObjectInfo.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
@@ -166,13 +165,13 @@ func (c *cacheObjects) DeleteObjects(ctx context.Context, bucket string, objects
|
||||
|
||||
// construct a metadata k-v map
|
||||
func getMetadata(objInfo ObjectInfo) map[string]string {
|
||||
metadata := make(map[string]string)
|
||||
metadata := make(map[string]string, len(objInfo.UserDefined)+4)
|
||||
metadata["etag"] = objInfo.ETag
|
||||
metadata["content-type"] = objInfo.ContentType
|
||||
if objInfo.ContentEncoding != "" {
|
||||
metadata["content-encoding"] = objInfo.ContentEncoding
|
||||
}
|
||||
if objInfo.Expires != timeSentinel {
|
||||
if !objInfo.Expires.Equal(timeSentinel) {
|
||||
metadata["expires"] = objInfo.Expires.Format(http.TimeFormat)
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
@@ -284,12 +283,6 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||
// Reaching here implies cache miss
|
||||
c.cacheStats.incMiss()
|
||||
|
||||
// Since we got here, we are serving the request from backend,
|
||||
// and also adding the object to the cache.
|
||||
if dcache.diskUsageHigh() {
|
||||
dcache.triggerGC <- struct{}{} // this is non-blocking
|
||||
}
|
||||
|
||||
bkReader, bkErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
||||
|
||||
if bkErr != nil {
|
||||
@@ -306,7 +299,9 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||
if cacheErr == nil {
|
||||
bkReader.ObjInfo.CacheLookupStatus = CacheHit
|
||||
}
|
||||
if !dcache.diskAvailable(objInfo.Size) {
|
||||
|
||||
// Check if we can add it without exceeding total cache size.
|
||||
if !dcache.diskSpaceAvailable(objInfo.Size) {
|
||||
return bkReader, bkErr
|
||||
}
|
||||
|
||||
@@ -317,16 +312,18 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||
rs = nil
|
||||
}
|
||||
// fill cache in the background for range GET requests
|
||||
bReader, bErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
||||
bReader, bErr := c.GetObjectNInfoFn(GlobalContext, bucket, object, rs, h, lockType, opts)
|
||||
if bErr != nil {
|
||||
return
|
||||
}
|
||||
defer bReader.Close()
|
||||
oi, _, _, err := dcache.statRange(ctx, bucket, object, rs)
|
||||
oi, _, _, err := dcache.statRange(GlobalContext, bucket, object, rs)
|
||||
// avoid cache overwrite if another background routine filled cache
|
||||
if err != nil || oi.ETag != bReader.ObjInfo.ETag {
|
||||
// use a new context to avoid locker prematurely timing out operation when the GetObjectNInfo returns.
|
||||
dcache.Put(context.Background(), bucket, object, bReader, bReader.ObjInfo.Size, rs, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)}, false)
|
||||
dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, rs, ObjectOptions{
|
||||
UserDefined: getMetadata(bReader.ObjInfo),
|
||||
}, false)
|
||||
return
|
||||
}
|
||||
}()
|
||||
@@ -336,8 +333,13 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
teeReader := io.TeeReader(bkReader, pipeWriter)
|
||||
userDefined := getMetadata(bkReader.ObjInfo)
|
||||
go func() {
|
||||
putErr := dcache.Put(ctx, bucket, object, io.LimitReader(pipeReader, bkReader.ObjInfo.Size), bkReader.ObjInfo.Size, nil, ObjectOptions{UserDefined: getMetadata(bkReader.ObjInfo)}, false)
|
||||
putErr := dcache.Put(ctx, bucket, object,
|
||||
io.LimitReader(pipeReader, bkReader.ObjInfo.Size),
|
||||
bkReader.ObjInfo.Size, nil, ObjectOptions{
|
||||
UserDefined: userDefined,
|
||||
}, false)
|
||||
// close the write end of the pipe, so the error gets
|
||||
// propagated to getObjReader
|
||||
pipeWriter.CloseWithError(putErr)
|
||||
@@ -606,9 +608,10 @@ func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r *
|
||||
}
|
||||
|
||||
// fetch from backend if there is no space on cache drive
|
||||
if !dcache.diskAvailable(size) {
|
||||
if !dcache.diskSpaceAvailable(size) {
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
}
|
||||
|
||||
if opts.ServerSideEncryption != nil {
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
@@ -634,15 +637,15 @@ func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r *
|
||||
if err == nil {
|
||||
go func() {
|
||||
// fill cache in the background
|
||||
bReader, bErr := c.GetObjectNInfoFn(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
bReader, bErr := c.GetObjectNInfoFn(GlobalContext, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if bErr != nil {
|
||||
return
|
||||
}
|
||||
defer bReader.Close()
|
||||
oi, _, err := dcache.Stat(ctx, bucket, object)
|
||||
oi, _, err := dcache.Stat(GlobalContext, bucket, object)
|
||||
// avoid cache overwrite if another background routine filled cache
|
||||
if err != nil || oi.ETag != bReader.ObjInfo.ETag {
|
||||
dcache.Put(ctx, bucket, object, bReader, bReader.ObjInfo.Size, nil, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)}, false)
|
||||
dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, nil, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)}, false)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -715,7 +718,9 @@ func (c *cacheObjects) gc(ctx context.Context) {
|
||||
}
|
||||
for _, dcache := range c.cache {
|
||||
if dcache != nil {
|
||||
dcache.triggerGC <- struct{}{}
|
||||
// Check if there is disk.
|
||||
// Will queue a GC scan if at high watermark.
|
||||
dcache.diskSpaceAvailable(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ func TestCacheMetadataObjInfo(t *testing.T) {
|
||||
if objInfo.Size != 0 {
|
||||
t.Fatal("Unexpected object info value for Size", objInfo.Size)
|
||||
}
|
||||
if objInfo.ModTime != timeSentinel {
|
||||
if !objInfo.ModTime.Equal(timeSentinel) {
|
||||
t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime)
|
||||
}
|
||||
if objInfo.IsDir {
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -26,7 +27,8 @@ const (
|
||||
dynamicTimeoutIncreaseThresholdPct = 0.33 // Upper threshold for failures in order to increase timeout
|
||||
dynamicTimeoutDecreaseThresholdPct = 0.10 // Lower threshold for failures in order to decrease timeout
|
||||
dynamicTimeoutLogSize = 16
|
||||
maxDuration = time.Duration(1<<63 - 1)
|
||||
maxDuration = math.MaxInt64
|
||||
maxDynamicTimeout = 24 * time.Hour // Never set timeout bigger than this.
|
||||
)
|
||||
|
||||
// timeouts that are dynamically adapted based on actual usage results
|
||||
@@ -40,6 +42,12 @@ type dynamicTimeout struct {
|
||||
|
||||
// newDynamicTimeout returns a new dynamic timeout initialized with timeout value
|
||||
func newDynamicTimeout(timeout, minimum time.Duration) *dynamicTimeout {
|
||||
if timeout <= 0 || minimum <= 0 {
|
||||
panic("newDynamicTimeout: negative or zero timeout")
|
||||
}
|
||||
if minimum > timeout {
|
||||
minimum = timeout
|
||||
}
|
||||
return &dynamicTimeout{timeout: int64(timeout), minimum: int64(minimum)}
|
||||
}
|
||||
|
||||
@@ -61,60 +69,73 @@ func (dt *dynamicTimeout) LogFailure() {
|
||||
|
||||
// logEntry stores a log entry
|
||||
func (dt *dynamicTimeout) logEntry(duration time.Duration) {
|
||||
if duration < 0 {
|
||||
return
|
||||
}
|
||||
entries := int(atomic.AddInt64(&dt.entries, 1))
|
||||
index := entries - 1
|
||||
if index < dynamicTimeoutLogSize {
|
||||
dt.mutex.Lock()
|
||||
dt.log[index] = duration
|
||||
|
||||
// We leak entries while we copy
|
||||
if entries == dynamicTimeoutLogSize {
|
||||
|
||||
// Make copy on stack in order to call adjust()
|
||||
logCopy := [dynamicTimeoutLogSize]time.Duration{}
|
||||
copy(logCopy[:], dt.log[:])
|
||||
|
||||
// reset log entries
|
||||
atomic.StoreInt64(&dt.entries, 0)
|
||||
dt.mutex.Unlock()
|
||||
|
||||
dt.adjust(logCopy)
|
||||
return
|
||||
}
|
||||
dt.mutex.Unlock()
|
||||
}
|
||||
if entries == dynamicTimeoutLogSize {
|
||||
dt.mutex.Lock()
|
||||
|
||||
// Make copy on stack in order to call adjust()
|
||||
logCopy := [dynamicTimeoutLogSize]time.Duration{}
|
||||
copy(logCopy[:], dt.log[:])
|
||||
|
||||
// reset log entries
|
||||
atomic.StoreInt64(&dt.entries, 0)
|
||||
|
||||
dt.mutex.Unlock()
|
||||
|
||||
dt.adjust(logCopy)
|
||||
}
|
||||
}
|
||||
|
||||
// adjust changes the value of the dynamic timeout based on the
|
||||
// previous results
|
||||
func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) {
|
||||
|
||||
failures, average := 0, int64(0)
|
||||
for i := 0; i < len(entries); i++ {
|
||||
if entries[i] == maxDuration {
|
||||
failures, max := 0, time.Duration(0)
|
||||
for _, dur := range entries[:] {
|
||||
if dur == maxDuration {
|
||||
failures++
|
||||
} else {
|
||||
average += int64(entries[i])
|
||||
} else if dur > max {
|
||||
max = dur
|
||||
}
|
||||
}
|
||||
if failures < len(entries) {
|
||||
average /= int64(len(entries) - failures)
|
||||
}
|
||||
|
||||
timeOutHitPct := float64(failures) / float64(len(entries))
|
||||
failPct := float64(failures) / float64(len(entries))
|
||||
|
||||
if timeOutHitPct > dynamicTimeoutIncreaseThresholdPct {
|
||||
if failPct > dynamicTimeoutIncreaseThresholdPct {
|
||||
// We are hitting the timeout too often, so increase the timeout by 25%
|
||||
timeout := atomic.LoadInt64(&dt.timeout) * 125 / 100
|
||||
atomic.StoreInt64(&dt.timeout, timeout)
|
||||
} else if timeOutHitPct < dynamicTimeoutDecreaseThresholdPct {
|
||||
// We are hitting the timeout relatively few times, so decrease the timeout
|
||||
average = average * 125 / 100 // Add buffer of 25% on top of average
|
||||
|
||||
timeout := (atomic.LoadInt64(&dt.timeout) + int64(average)) / 2 // Middle between current timeout and average success
|
||||
// Set upper cap.
|
||||
if timeout > int64(maxDynamicTimeout) {
|
||||
timeout = int64(maxDynamicTimeout)
|
||||
}
|
||||
// Safety, shouldn't happen
|
||||
if timeout < dt.minimum {
|
||||
timeout = dt.minimum
|
||||
}
|
||||
atomic.StoreInt64(&dt.timeout, timeout)
|
||||
} else if failPct < dynamicTimeoutDecreaseThresholdPct {
|
||||
// We are hitting the timeout relatively few times,
|
||||
// so decrease the timeout towards 25 % of maximum time spent.
|
||||
max = max * 125 / 100
|
||||
|
||||
timeout := atomic.LoadInt64(&dt.timeout)
|
||||
if max < time.Duration(timeout) {
|
||||
// Move 50% toward the max.
|
||||
timeout = (int64(max) + timeout) / 2
|
||||
}
|
||||
if timeout < dt.minimum {
|
||||
timeout = dt.minimum
|
||||
}
|
||||
atomic.StoreInt64(&dt.timeout, timeout)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ package cmd
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -98,7 +100,7 @@ func TestDynamicTimeoutDualDecrease(t *testing.T) {
|
||||
adjustedAgain := timeout.Timeout()
|
||||
|
||||
if initial <= adjusted || adjusted <= adjustedAgain {
|
||||
t.Errorf("Failure to decrease timeout multiple times")
|
||||
t.Errorf("Failure to decrease timeout multiple times, initial: %v, adjusted: %v, again: %v", initial, adjusted, adjustedAgain)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,6 +125,30 @@ func TestDynamicTimeoutManyDecreases(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDynamicTimeoutConcurrent(t *testing.T) {
|
||||
// Race test.
|
||||
timeout := newDynamicTimeout(time.Second, time.Millisecond)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
|
||||
wg.Add(1)
|
||||
rng := rand.New(rand.NewSource(int64(i)))
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < 100; i++ {
|
||||
timeout.LogFailure()
|
||||
for j := 0; j < 100; j++ {
|
||||
timeout.LogSuccess(time.Duration(float64(time.Second) * rng.Float64()))
|
||||
}
|
||||
to := timeout.Timeout()
|
||||
if to < time.Millisecond || to > time.Second {
|
||||
panic(to)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestDynamicTimeoutHitMinimum(t *testing.T) {
|
||||
|
||||
const minimum = 30 * time.Second
|
||||
@@ -168,7 +194,7 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) {
|
||||
|
||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(0)
|
||||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
@@ -188,7 +214,7 @@ func TestDynamicTimeoutAdjustNormalized(t *testing.T) {
|
||||
|
||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(0)
|
||||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
|
||||
@@ -388,12 +388,7 @@ func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, offset,
|
||||
object: object,
|
||||
customerKeyHeader: h.Get(crypto.SSECKey),
|
||||
copySource: copySource,
|
||||
}
|
||||
|
||||
w.metadata = map[string]string{}
|
||||
// Copy encryption metadata for internal use.
|
||||
for k, v := range oi.UserDefined {
|
||||
w.metadata[k] = v
|
||||
metadata: cloneMSS(oi.UserDefined),
|
||||
}
|
||||
|
||||
if w.copySource {
|
||||
@@ -432,10 +427,7 @@ type DecryptBlocksReader struct {
|
||||
}
|
||||
|
||||
func (d *DecryptBlocksReader) buildDecrypter(partID int) error {
|
||||
m := make(map[string]string)
|
||||
for k, v := range d.metadata {
|
||||
m[k] = v
|
||||
}
|
||||
m := cloneMSS(d.metadata)
|
||||
// Initialize the first decrypter; new decrypters will be
|
||||
// initialized in Read() operation as needed.
|
||||
var key []byte
|
||||
@@ -605,12 +597,14 @@ func getDecryptedETag(headers http.Header, objInfo ObjectInfo, copySource bool)
|
||||
if crypto.IsMultiPart(objInfo.UserDefined) {
|
||||
return objInfo.ETag
|
||||
}
|
||||
|
||||
if crypto.SSECopy.IsRequested(headers) {
|
||||
key, err = crypto.SSECopy.ParseHTTP(headers)
|
||||
if err != nil {
|
||||
return objInfo.ETag
|
||||
}
|
||||
}
|
||||
|
||||
// As per AWS S3 Spec, ETag for SSE-C encrypted objects need not be MD5Sum of the data.
|
||||
// Since server side copy with same source and dest just replaces the ETag, we save
|
||||
// encrypted content MD5Sum as ETag for both SSE-C and SSE-S3, we standardize the ETag
|
||||
@@ -664,7 +658,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
|
||||
|
||||
if rs == nil {
|
||||
// No range, so offsets refer to the whole object.
|
||||
return 0, int64(o.Size), 0, 0, 0, nil
|
||||
return 0, o.Size, 0, 0, 0, nil
|
||||
}
|
||||
|
||||
// Assemble slice of (decrypted) part sizes in `sizes`
|
||||
|
||||
@@ -373,7 +373,7 @@ func TestGetDecryptedRange(t *testing.T) {
|
||||
sum := int64(0)
|
||||
for i, s := range sizes {
|
||||
r[i].Number = i
|
||||
r[i].Size = int64(getEncSize(s))
|
||||
r[i].Size = getEncSize(s)
|
||||
sum += r[i].Size
|
||||
}
|
||||
return ObjectInfo{
|
||||
|
||||
@@ -329,28 +329,28 @@ var (
|
||||
// CreateServerEndpoints - validates and creates new endpoints from input args, supports
|
||||
// both ellipses and without ellipses transparently.
|
||||
func createServerEndpoints(serverAddr string, args ...string) (
|
||||
endpointZones EndpointZones, setDriveCount int,
|
||||
setupType SetupType, err error) {
|
||||
endpointZones EndpointZones, setupType SetupType, err error) {
|
||||
|
||||
if len(args) == 0 {
|
||||
return nil, -1, -1, errInvalidArgument
|
||||
return nil, -1, errInvalidArgument
|
||||
}
|
||||
|
||||
var setDriveCount int
|
||||
if v := env.Get(EnvErasureSetDriveCount, ""); v != "" {
|
||||
setDriveCount, err = strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return nil, -1, -1, config.ErrInvalidErasureSetSize(err)
|
||||
return nil, -1, config.ErrInvalidErasureSetSize(err)
|
||||
}
|
||||
}
|
||||
|
||||
if !ellipses.HasEllipses(args...) {
|
||||
setArgs, err := GetAllSets(uint64(setDriveCount), args...)
|
||||
if err != nil {
|
||||
return nil, -1, -1, err
|
||||
return nil, -1, err
|
||||
}
|
||||
endpointList, newSetupType, err := CreateEndpoints(serverAddr, false, setArgs...)
|
||||
if err != nil {
|
||||
return nil, -1, -1, err
|
||||
return nil, -1, err
|
||||
}
|
||||
endpointZones = append(endpointZones, ZoneEndpoints{
|
||||
SetCount: len(setArgs),
|
||||
@@ -358,7 +358,7 @@ func createServerEndpoints(serverAddr string, args ...string) (
|
||||
Endpoints: endpointList,
|
||||
})
|
||||
setupType = newSetupType
|
||||
return endpointZones, len(setArgs[0]), setupType, nil
|
||||
return endpointZones, setupType, nil
|
||||
}
|
||||
|
||||
var prevSetupType SetupType
|
||||
@@ -366,25 +366,25 @@ func createServerEndpoints(serverAddr string, args ...string) (
|
||||
for _, arg := range args {
|
||||
setArgs, err := GetAllSets(uint64(setDriveCount), arg)
|
||||
if err != nil {
|
||||
return nil, -1, -1, err
|
||||
return nil, -1, err
|
||||
}
|
||||
var endpointList Endpoints
|
||||
endpointList, setupType, err = CreateEndpoints(serverAddr, foundPrevLocal, setArgs...)
|
||||
if err != nil {
|
||||
return nil, -1, -1, err
|
||||
return nil, -1, err
|
||||
}
|
||||
if setDriveCount != 0 && setDriveCount != len(setArgs[0]) {
|
||||
return nil, -1, -1, fmt.Errorf("All zones should have same drive per set ratio - expected %d, got %d", setDriveCount, len(setArgs[0]))
|
||||
return nil, -1, fmt.Errorf("All zones should have same drive per set ratio - expected %d, got %d", setDriveCount, len(setArgs[0]))
|
||||
}
|
||||
if prevSetupType != UnknownSetupType && prevSetupType != setupType {
|
||||
return nil, -1, -1, fmt.Errorf("All zones should be of the same setup-type to maintain the original SLA expectations - expected %s, got %s", prevSetupType, setupType)
|
||||
return nil, -1, fmt.Errorf("All zones should be of the same setup-type to maintain the original SLA expectations - expected %s, got %s", prevSetupType, setupType)
|
||||
}
|
||||
if err = endpointZones.Add(ZoneEndpoints{
|
||||
SetCount: len(setArgs),
|
||||
DrivesPerSet: len(setArgs[0]),
|
||||
Endpoints: endpointList,
|
||||
}); err != nil {
|
||||
return nil, -1, -1, err
|
||||
return nil, -1, err
|
||||
}
|
||||
foundPrevLocal = endpointList.atleastOneEndpointLocal()
|
||||
if setDriveCount == 0 {
|
||||
@@ -393,5 +393,5 @@ func createServerEndpoints(serverAddr string, args ...string) (
|
||||
prevSetupType = setupType
|
||||
}
|
||||
|
||||
return endpointZones, setDriveCount, setupType, nil
|
||||
return endpointZones, setupType, nil
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ func TestCreateServerEndpoints(t *testing.T) {
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
_, _, _, err := createServerEndpoints(testCase.serverAddr, testCase.args...)
|
||||
_, _, err := createServerEndpoints(testCase.serverAddr, testCase.args...)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Expected success but failed instead %s", err)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -193,7 +194,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
|
||||
}
|
||||
|
||||
// ZoneEndpoints represent endpoints in a given zone
|
||||
// along with its setCount and drivesPerSet.
|
||||
// along with its setCount and setDriveCount.
|
||||
type ZoneEndpoints struct {
|
||||
SetCount int
|
||||
DrivesPerSet int
|
||||
@@ -203,6 +204,21 @@ type ZoneEndpoints struct {
|
||||
// EndpointZones - list of list of endpoints
|
||||
type EndpointZones []ZoneEndpoints
|
||||
|
||||
// GetLocalZoneIdx returns the zone which endpoint belongs to locally.
|
||||
// if ep is remote this code will return -1 zoneIndex
|
||||
func (l EndpointZones) GetLocalZoneIdx(ep Endpoint) int {
|
||||
for i, zep := range l {
|
||||
for _, cep := range zep.Endpoints {
|
||||
if cep.IsLocal && ep.IsLocal {
|
||||
if reflect.DeepEqual(cep, ep) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Add add zone endpoints
|
||||
func (l *EndpointZones) Add(zeps ZoneEndpoints) error {
|
||||
existSet := set.NewStringSet()
|
||||
@@ -753,9 +769,16 @@ func GetProxyEndpoints(endpointZones EndpointZones) ([]ProxyEndpoint, error) {
|
||||
RootCAs: globalRootCAs,
|
||||
}
|
||||
}
|
||||
|
||||
tr := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)()
|
||||
// Allow more requests to be in flight with higher response header timeout.
|
||||
tr.ResponseHeaderTimeout = 30 * time.Minute
|
||||
tr.MaxIdleConns = 64
|
||||
tr.MaxIdleConnsPerHost = 64
|
||||
|
||||
proxyEps = append(proxyEps, ProxyEndpoint{
|
||||
Endpoint: endpoint,
|
||||
Transport: newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)(),
|
||||
Transport: tr,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket stri
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] != nil {
|
||||
if err := storageDisks[index].MakeVol(bucket); err != nil {
|
||||
if err := storageDisks[index].MakeVol(ctx, bucket); err != nil {
|
||||
if err != errVolumeExists {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
@@ -75,7 +75,7 @@ func undoDeleteBucket(storageDisks []StorageAPI, bucket string) {
|
||||
}
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
_ = storageDisks[index].MakeVol(bucket)
|
||||
_ = storageDisks[index].MakeVol(context.Background(), bucket)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string) (
|
||||
bucketErrs = append(bucketErrs, errDiskNotFound)
|
||||
continue
|
||||
}
|
||||
volInfo, serr := disk.StatVol(bucketName)
|
||||
volInfo, serr := disk.StatVol(ctx, bucketName)
|
||||
if serr == nil {
|
||||
return BucketInfo(volInfo), nil
|
||||
}
|
||||
@@ -129,12 +129,11 @@ func (er erasureObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketI
|
||||
continue
|
||||
}
|
||||
var volsInfo []VolInfo
|
||||
volsInfo, err = disk.ListVols()
|
||||
volsInfo, err = disk.ListVols(ctx)
|
||||
if err == nil {
|
||||
// NOTE: The assumption here is that volumes across all disks in
|
||||
// readQuorum have consistent view i.e they all have same number
|
||||
// of buckets. This is essentially not verified since healing
|
||||
// should take care of this.
|
||||
// of buckets.
|
||||
var bucketsInfo []BucketInfo
|
||||
for _, volInfo := range volsInfo {
|
||||
if isReservedOrInvalidBucket(volInfo.Name, true) {
|
||||
@@ -182,10 +181,10 @@ func deleteDanglingBucket(ctx context.Context, storageDisks []StorageAPI, dErrs
|
||||
for index, err := range dErrs {
|
||||
if err == errVolumeNotEmpty {
|
||||
// Attempt to delete bucket again.
|
||||
if derr := storageDisks[index].DeleteVol(bucket, false); derr == errVolumeNotEmpty {
|
||||
if derr := storageDisks[index].DeleteVol(ctx, bucket, false); derr == errVolumeNotEmpty {
|
||||
_ = cleanupDir(ctx, storageDisks[index], bucket, "")
|
||||
|
||||
_ = storageDisks[index].DeleteVol(bucket, false)
|
||||
_ = storageDisks[index].DeleteVol(ctx, bucket, false)
|
||||
|
||||
// Cleanup all the previously incomplete multiparts.
|
||||
_ = cleanupDir(ctx, storageDisks[index], minioMetaMultipartBucket, bucket)
|
||||
@@ -205,7 +204,7 @@ func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, forceD
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] != nil {
|
||||
if err := storageDisks[index].DeleteVol(bucket, forceDelete); err != nil {
|
||||
if err := storageDisks[index].DeleteVol(ctx, bucket, forceDelete); err != nil {
|
||||
return err
|
||||
}
|
||||
err := cleanupDir(ctx, storageDisks[index], minioMetaMultipartBucket, bucket)
|
||||
@@ -244,7 +243,10 @@ func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, forceD
|
||||
// If we reduce quorum to nil, means we have deleted buckets properly
|
||||
// on some servers in quorum, we should look for volumeNotEmpty errors
|
||||
// and delete those buckets as well.
|
||||
deleteDanglingBucket(ctx, storageDisks, dErrs, bucket)
|
||||
//
|
||||
// let this call succeed, even if client cancels the context
|
||||
// this is to ensure that we don't leave any stale content
|
||||
deleteDanglingBucket(context.Background(), storageDisks, dErrs, bucket)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user