Compare commits
365 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
48191dd748 | ||
|
|
c4f29d24da | ||
|
|
db7890660e | ||
|
|
9adc33efbb | ||
|
|
8f65aba04b | ||
|
|
3a0082f0f1 | ||
|
|
14792cdbc6 | ||
|
|
4939987eb8 | ||
|
|
4bca62a0bd | ||
|
|
82e2be4239 | ||
|
|
4550ac6fff | ||
|
|
97856bfebf | ||
|
|
a60a0e52bb | ||
|
|
83a67a1d21 | ||
|
|
12391ec4ba | ||
|
|
e65ed2e44f | ||
|
|
d90044b847 | ||
|
|
d8c1f93de6 | ||
|
|
54d243cd98 | ||
|
|
d74e4642e3 | ||
|
|
a51488cbaa | ||
|
|
04848dfa1c | ||
|
|
78d18d8fc8 | ||
|
|
dc819afa44 | ||
|
|
4a564336fe | ||
|
|
6b7ced80fe | ||
|
|
f60bbdf86b | ||
|
|
8c79f87f02 | ||
|
|
f3beb1236a | ||
|
|
934bed47fa | ||
|
|
038bcd9079 | ||
|
|
6d70f6a4ac | ||
|
|
ce93b2681b | ||
|
|
8d036ed6d8 | ||
|
|
9c53cc1b83 | ||
|
|
3514e89eb3 | ||
|
|
6ff12f5f01 | ||
|
|
ee2a436a5b | ||
|
|
a896125490 | ||
|
|
e083471ec4 | ||
|
|
de9b64834e | ||
|
|
919441d9c4 | ||
|
|
80d31113e5 | ||
|
|
7e2b79984e | ||
|
|
d54cf77356 | ||
|
|
951b6b203b | ||
|
|
44e23b7f4f | ||
|
|
c22a387695 | ||
|
|
1ab4d6a6aa | ||
|
|
96c0ce1f0c | ||
|
|
fe11e9047d | ||
|
|
ce0e17b62b | ||
|
|
303be1866d | ||
|
|
a6113b2315 | ||
|
|
3ca046b408 | ||
|
|
4ec45753e6 | ||
|
|
e6ea5c2703 | ||
|
|
790833f3b2 | ||
|
|
02aecb2fc1 | ||
|
|
7cbca43eb1 | ||
|
|
2f564437ae | ||
|
|
ae4ded7fd1 | ||
|
|
bdd094bc39 | ||
|
|
e6fa410778 | ||
|
|
350c5ff8f8 | ||
|
|
f139a19238 | ||
|
|
e90efd73a2 | ||
|
|
81c907b4bf | ||
|
|
ab49471f33 | ||
|
|
aabf053d2f | ||
|
|
f839bb5a0a | ||
|
|
91130e884b | ||
|
|
2ff655a745 | ||
|
|
0422eda6a2 | ||
|
|
31e6f60847 | ||
|
|
7742238495 | ||
|
|
3ad41fe89d | ||
|
|
f96ed3769f | ||
|
|
a75fafdbe2 | ||
|
|
a58b7874ef | ||
|
|
6990de9c94 | ||
|
|
75a8e81f8f | ||
|
|
519c0077a9 | ||
|
|
734d07a532 | ||
|
|
df93102235 | ||
|
|
39f3d5493b | ||
|
|
14a7ae8586 | ||
|
|
692ff41ef7 | ||
|
|
86409fa93d | ||
|
|
7bc47a14cc | ||
|
|
4a31b31ca6 | ||
|
|
9263be8cca | ||
|
|
73e308079a | ||
|
|
08b24620c0 | ||
|
|
95675b0c9a | ||
|
|
251c1ef6da | ||
|
|
0fa430c1da | ||
|
|
f60b6eb82e | ||
|
|
1ebf6f146a | ||
|
|
8f7fe0405e | ||
|
|
9a34fd5c4a | ||
|
|
b9e3a8b5ac | ||
|
|
f794fe79e3 | ||
|
|
0f9e125cf3 | ||
|
|
d778d9493f | ||
|
|
70d2c2ccc9 | ||
|
|
9dea7020f0 | ||
|
|
990d074f7d | ||
|
|
e413f05397 | ||
|
|
d1b1fee080 | ||
|
|
9738d605e4 | ||
|
|
7ff8128f15 | ||
|
|
10099357b6 | ||
|
|
80b8ce89a4 | ||
|
|
0745736e28 | ||
|
|
0b766288ef | ||
|
|
598ca0569c | ||
|
|
d295ce5708 | ||
|
|
b5a3d79bce | ||
|
|
17a5ff51ff | ||
|
|
0784a0c33a | ||
|
|
3595cb1267 | ||
|
|
0bcb1b679d | ||
|
|
a3017c724e | ||
|
|
07859ef48b | ||
|
|
267d7bf0a9 | ||
|
|
be83dfc52a | ||
|
|
ca88ca753c | ||
|
|
f86d3538f6 | ||
|
|
1c3590078d | ||
|
|
aa158228f9 | ||
|
|
8747834c69 | ||
|
|
2c1e37197b | ||
|
|
50c10a5087 | ||
|
|
9f4ad873bc | ||
|
|
4683a623dc | ||
|
|
06899210a7 | ||
|
|
cbdab62c1e | ||
|
|
8df6112204 | ||
|
|
8e8ddf7233 | ||
|
|
311ab43d4c | ||
|
|
97692bc772 | ||
|
|
21016265e5 | ||
|
|
54120107ce | ||
|
|
9bf5990ea9 | ||
|
|
74f7cf24ae | ||
|
|
fb28aa847b | ||
|
|
0724205f35 | ||
|
|
b72cac4cf3 | ||
|
|
47d715f642 | ||
|
|
bd77f29fc4 | ||
|
|
d1e1205036 | ||
|
|
71753e21e0 | ||
|
|
fde3299bf3 | ||
|
|
1a1f00fa15 | ||
|
|
4a1efabda4 | ||
|
|
3b88a646ec | ||
|
|
2294e53a0b | ||
|
|
f0819cce75 | ||
|
|
1e11b4629f | ||
|
|
5c72a34fa8 | ||
|
|
b9277c8030 | ||
|
|
8c76e1353e | ||
|
|
ad382799b1 | ||
|
|
68de5a6f6a | ||
|
|
4ea31da889 | ||
|
|
0a796505c1 | ||
|
|
37749f4623 | ||
|
|
86e0d272f3 | ||
|
|
8527f22df1 | ||
|
|
b456292295 | ||
|
|
03fdbc3ec2 | ||
|
|
4c773f7068 | ||
|
|
d8e07f2c41 | ||
|
|
5412d730c1 | ||
|
|
fe9f23e632 | ||
|
|
422898d9b3 | ||
|
|
b686bb9c83 | ||
|
|
5e5cdc581d | ||
|
|
02cfa774be | ||
|
|
3a2f89b3c0 | ||
|
|
6135f072d2 | ||
|
|
7331659d3d | ||
|
|
e63a44b734 | ||
|
|
6b14c4ab1e | ||
|
|
4bf90ca67f | ||
|
|
e0655e24f2 | ||
|
|
bfc36aed89 | ||
|
|
be7f67268d | ||
|
|
a982baff27 | ||
|
|
51222cc664 | ||
|
|
f53c5a020e | ||
|
|
5b30bbda92 | ||
|
|
858e2a43df | ||
|
|
df9894e275 | ||
|
|
ca77ee1c0e | ||
|
|
592f2f23a3 | ||
|
|
c49a80db41 | ||
|
|
46275c6547 | ||
|
|
0994ed9783 | ||
|
|
eb95353cb1 | ||
|
|
029758cb20 | ||
|
|
649035677f | ||
|
|
646d6917ed | ||
|
|
d9db7f3308 | ||
|
|
6a8c62f9fd | ||
|
|
4442382c16 | ||
|
|
00124c56d9 | ||
|
|
2c32c2149e | ||
|
|
734f258878 | ||
|
|
0e0c53bba4 | ||
|
|
5cc23ae052 | ||
|
|
6fd088f448 | ||
|
|
d6d770c1b1 | ||
|
|
b07df5cae1 | ||
|
|
c107728676 | ||
|
|
ba5215561f | ||
|
|
4eb45c9a0f | ||
|
|
187129a907 | ||
|
|
284a2b9021 | ||
|
|
0b53e30ecb | ||
|
|
bd2131ba34 | ||
|
|
73a41a725a | ||
|
|
1aec168c84 | ||
|
|
21a549a83b | ||
|
|
8a16a1a1a9 | ||
|
|
ad726b49b4 | ||
|
|
db2241066b | ||
|
|
f1cc16e788 | ||
|
|
3820a905e0 | ||
|
|
2042d4873c | ||
|
|
f9be783f3e | ||
|
|
23773bb32b | ||
|
|
2fd7545b6c | ||
|
|
71b97fd3ac | ||
|
|
03991c5d41 | ||
|
|
9c042a503b | ||
|
|
614060764d | ||
|
|
4a678ad70f | ||
|
|
a3ba8188d7 | ||
|
|
2760fc86af | ||
|
|
abb14aeec1 | ||
|
|
8ceb2a93fd | ||
|
|
c2f16ee846 | ||
|
|
071c004f8b | ||
|
|
6484453fc6 | ||
|
|
a0d0645128 | ||
|
|
1738eb24b1 | ||
|
|
253194e491 | ||
|
|
736e58dd68 | ||
|
|
907a171edd | ||
|
|
ed6d2a100f | ||
|
|
effe131090 | ||
|
|
01498a3e34 | ||
|
|
18063bf25c | ||
|
|
57f0176759 | ||
|
|
dbbed6f7f0 | ||
|
|
7fbfdceba3 | ||
|
|
9dda9fb903 | ||
|
|
f1418a50f0 | ||
|
|
017954e7ea | ||
|
|
806625cbff | ||
|
|
045e30f2c1 | ||
|
|
c6a9a94f94 | ||
|
|
8e7c00f3d4 | ||
|
|
d1ed1da8c6 | ||
|
|
23e8390997 | ||
|
|
71403be912 | ||
|
|
f28d02b7f2 | ||
|
|
e0cb814f3f | ||
|
|
98a08e1644 | ||
|
|
3047121255 | ||
|
|
5a7f92481e | ||
|
|
0d45c38782 | ||
|
|
56d1b227cf | ||
|
|
061fa0635c | ||
|
|
6e138f955e | ||
|
|
bea87a5a20 | ||
|
|
2b4eb87d77 | ||
|
|
799758e54f | ||
|
|
1f9abbee4d | ||
|
|
fdf0ae9167 | ||
|
|
00eb6f6bc9 | ||
|
|
66174692a2 | ||
|
|
849fcf0127 | ||
|
|
209680e89f | ||
|
|
e0c04a2da0 | ||
|
|
27d9bd04e5 | ||
|
|
511424a287 | ||
|
|
bebcf4f004 | ||
|
|
eafa775952 | ||
|
|
66b4a862e0 | ||
|
|
9603489dd3 | ||
|
|
b302c8a5f4 | ||
|
|
4de88e87bb | ||
|
|
b880796aef | ||
|
|
37a5d5d7a0 | ||
|
|
3cac262dd1 | ||
|
|
e6ab4db6b8 | ||
|
|
ca989eb0b3 | ||
|
|
d778d034e7 | ||
|
|
df08fd1f03 | ||
|
|
ac82f416a4 | ||
|
|
f7f9517b6a | ||
|
|
90cff10e2b | ||
|
|
81caf35926 | ||
|
|
5726cef3ca | ||
|
|
5fdf47b118 | ||
|
|
8b74a72b21 | ||
|
|
eec69d6796 | ||
|
|
0537a21b79 | ||
|
|
4c54ed8748 | ||
|
|
a4006e23a0 | ||
|
|
b17dc81540 | ||
|
|
d73c4f09f3 | ||
|
|
4c81201f95 | ||
|
|
cd8d511d3d | ||
|
|
899a2fa1c7 | ||
|
|
17e17da00d | ||
|
|
a5da9120f3 | ||
|
|
aa12d75d75 | ||
|
|
dd4a2d7419 | ||
|
|
6fcbdd5607 | ||
|
|
3831cc9e3b | ||
|
|
230fc0d186 | ||
|
|
7f9498f43f | ||
|
|
1cf322b7d4 | ||
|
|
3168e93730 | ||
|
|
0b1c824618 | ||
|
|
c851e022b7 | ||
|
|
6f45e303f5 | ||
|
|
5ad032826a | ||
|
|
84bf4624a4 | ||
|
|
dff37aa33d | ||
|
|
d12831eb07 | ||
|
|
4a36cd7035 | ||
|
|
00555c747e | ||
|
|
03490c811b | ||
|
|
48d2c03250 | ||
|
|
ed78854cea | ||
|
|
e60834838f | ||
|
|
d616d8a857 | ||
|
|
24cab7f9df | ||
|
|
b2536476c9 | ||
|
|
02c1a08a5b | ||
|
|
5c47ce456e | ||
|
|
8ea55f9dba | ||
|
|
80e3dce631 | ||
|
|
80fab03b63 | ||
|
|
730d2dc7be | ||
|
|
0ee9678190 | ||
|
|
34859c6d4b | ||
|
|
b1c99e88ac | ||
|
|
0104af6bcc | ||
|
|
224daee391 | ||
|
|
34ea1d2167 | ||
|
|
9d95937018 | ||
|
|
74a7889a3e | ||
|
|
fa01e640f5 | ||
|
|
f355374962 | ||
|
|
bda0fe3150 | ||
|
|
b70995dd60 | ||
|
|
4b6264da7d | ||
|
|
48919de301 | ||
|
|
eb3ded420e |
51
.github/workflows/codeql.yml
vendored
51
.github/workflows/codeql.yml
vendored
@@ -1,51 +0,0 @@
|
||||
name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 19 * * 0'
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
|
||||
# CodeQL runs on ubuntu-latest and windows-latest
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: go, javascript
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -37,6 +37,8 @@ jobs:
|
||||
GO111MODULE: on
|
||||
MINIO_CI_CD: 1
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
sudo apt-get install devscripts shellcheck
|
||||
nancy_version=$(curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/sonatype-nexus-community/nancy/releases/latest | sed "s/https:\/\/github.com\/sonatype-nexus-community\/nancy\/releases\/tag\///")
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-linux.amd64-${nancy_version} && chmod +x nancy
|
||||
|
||||
@@ -23,5 +23,11 @@ issues:
|
||||
exclude:
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
|
||||
run:
|
||||
skip-dirs:
|
||||
- pkg/rpc
|
||||
- pkg/argon2
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
|
||||
15
Dockerfile
15
Dockerfile
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.14-alpine as builder
|
||||
FROM golang:1.15-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
@@ -11,22 +11,25 @@ RUN \
|
||||
git clone https://github.com/minio/minio && cd minio && \
|
||||
git checkout master && go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM alpine:3.12
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=builder /go/bin/minio /usr/bin/minio
|
||||
COPY --from=builder /go/minio/CREDITS /third_party/
|
||||
COPY --from=builder /go/minio/CREDITS /licenses/CREDITS
|
||||
COPY --from=builder /go/minio/LICENSE /licenses/LICENSE
|
||||
COPY --from=builder /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
microdnf clean all && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
FROM multiarch/qemu-user-static:x86_64-arm as qemu
|
||||
FROM arm32v7/alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY --from=qemu /usr/bin/qemu-arm-static /usr/bin
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
|
||||
apk update && apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' minisign && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-arm/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-arm/minio.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-arm/minio.minisig -o /usr/bin/minio.minisig && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/master/dockerscripts/verify-minio.sh -o /usr/bin/verify-minio.sh && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/master/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/master/CREDITS
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,35 +0,0 @@
|
||||
FROM multiarch/qemu-user-static:x86_64-aarch64 as qemu
|
||||
FROM arm64v8/alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY --from=qemu /usr/bin/qemu-aarch64-static /usr/bin
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
|
||||
apk update && apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' minisign && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-arm64/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-arm64/minio.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-arm64/minio.minisig -o /usr/bin/minio.minisig && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/master/dockerscripts/verify-minio.sh -o /usr/bin/verify-minio.sh && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/master/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/master/CREDITS && \
|
||||
/usr/bin/verify-minio.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
40
Dockerfile.cicd
Normal file
40
Dockerfile.cicd
Normal file
@@ -0,0 +1,40 @@
|
||||
FROM golang:1.15-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio && cd minio && \
|
||||
git checkout master && go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=builder /go/bin/minio /usr/bin/minio
|
||||
COPY --from=builder /go/minio/CREDITS /licenses/CREDITS
|
||||
COPY --from=builder /go/minio/LICENSE /licenses/LICENSE
|
||||
COPY --from=builder /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
microdnf clean all
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio", "server", "/data"]
|
||||
@@ -1,22 +1,26 @@
|
||||
FROM alpine:3.12
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY minio /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
ENV MINIO_UPDATE=off \
|
||||
MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
RUN \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu
|
||||
FROM ubuntu:20.04
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
FROM multiarch/qemu-user-static:x86_64-ppc64le as qemu
|
||||
FROM ppc64le/alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY --from=qemu /usr/bin/qemu-ppc64le-static /usr/bin
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
|
||||
apk update && apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' minisign && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-ppc64le/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-ppc64le/minio.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-ppc64le/minio.minisig -o /usr/bin/minio.minisig && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/master/dockerscripts/verify-minio.sh -o /usr/bin/verify-minio.sh && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/master/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/master/CREDITS && \
|
||||
/usr/bin/verify-minio.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,26 +1,38 @@
|
||||
FROM alpine:3.12
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
ARG TARGETARCH
|
||||
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="RELEASE.2020-11-25T22-36-25Z" \
|
||||
release="RELEASE.2020-11-25T22-36-25Z" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
|
||||
apk update && apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' minisign && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-amd64/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-amd64/minio.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-amd64/minio.minisig -o /usr/bin/minio.minisig && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/master/dockerscripts/verify-minio.sh -o /usr/bin/verify-minio.sh && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/master/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.minisig -o /usr/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/master/CREDITS && \
|
||||
/usr/bin/verify-minio.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
FROM multiarch/qemu-user-static:x86_64-s390x as qemu
|
||||
FROM s390x/alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY --from=qemu /usr/bin/qemu-s390x-static /usr/bin
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
|
||||
apk update && apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' minisign && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-s390x/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-s390x/minio.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-s390x/minio.minisig -o /usr/bin/minio.minisig && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/master/dockerscripts/verify-minio.sh -o /usr/bin/verify-minio.sh && \
|
||||
curl -s -q https://raw.githubusercontent.com/minio/minio/master/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/master/CREDITS && \
|
||||
/usr/bin/verify-minio.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
14
Makefile
14
Makefile
@@ -18,11 +18,17 @@ getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
|
||||
@which ruleguard 1>/dev/null || (echo "Installing ruleguard" && GO111MODULE=off go get github.com/quasilyte/go-ruleguard/...)
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && GO111MODULE=off go get github.com/tinylib/msgp)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && GO111MODULE=off go get golang.org/x/tools/cmd/stringer)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps fmt lint ruleguard
|
||||
verifiers: getdeps fmt lint ruleguard check-gen
|
||||
|
||||
check-gen:
|
||||
@go generate ./... >/dev/null
|
||||
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
|
||||
|
||||
fmt:
|
||||
@echo "Running $@ check"
|
||||
@@ -51,7 +57,7 @@ test-race: verifiers build
|
||||
# Verify minio binary
|
||||
verify:
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
# Verify healing of disks with minio binary
|
||||
@@ -65,6 +71,10 @@ build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
hotfix: LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#'))
|
||||
hotfix: install
|
||||
|
||||
docker: checks
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@GOOS=linux GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
@@ -88,7 +88,7 @@ service minio start
|
||||
```
|
||||
|
||||
## Install from Source
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.14](https://golang.org/dl/#stable)
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.15](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
@@ -150,7 +150,7 @@ service iptables restart
|
||||
```
|
||||
|
||||
## Test using MinIO Browser
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 ensure your server has started successfully.
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure your server has started successfully.
|
||||
|
||||

|
||||
|
||||
@@ -177,7 +177,7 @@ mc admin update <minio alias, e.g., myminio>
|
||||
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
|
||||
- `mc admin update` is disabled in kubernetes/container environments, container environments provide their own mechanisms to rollout of updates.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` to any new releases until all clusters have been successfully updated.
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)x
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)
|
||||
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
|
||||
@@ -57,22 +57,6 @@ export class BrowserDropdown extends React.Component {
|
||||
const { fetchServerInfo } = this.props
|
||||
fetchServerInfo()
|
||||
}
|
||||
fullScreen(e) {
|
||||
e.preventDefault()
|
||||
let el = document.documentElement
|
||||
if (el.requestFullscreen) {
|
||||
el.requestFullscreen()
|
||||
}
|
||||
if (el.mozRequestFullScreen) {
|
||||
el.mozRequestFullScreen()
|
||||
}
|
||||
if (el.webkitRequestFullscreen) {
|
||||
el.webkitRequestFullscreen()
|
||||
}
|
||||
if (el.msRequestFullscreen) {
|
||||
el.msRequestFullscreen()
|
||||
}
|
||||
}
|
||||
logout(e) {
|
||||
e.preventDefault()
|
||||
web.Logout()
|
||||
@@ -87,24 +71,30 @@ export class BrowserDropdown extends React.Component {
|
||||
<i className="fas fa-bars" />
|
||||
</Dropdown.Toggle>
|
||||
<Dropdown.Menu className="dropdown-menu-right">
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/?ref=ob">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://github.com/minio/minio">
|
||||
GitHub <i className="fab fa-github" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.fullScreen}>
|
||||
Fullscreen <i className="fas fa-expand" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://slack.min.io">
|
||||
Ask for help <i className="fas fa-question-circle" />
|
||||
<a target="_blank" href="https://min.io/pricing?ref=ob">
|
||||
Get Support <i className="fas fa-question-circle" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
@@ -118,20 +108,9 @@ export class BrowserDropdown extends React.Component {
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" id="logout" onClick={this.logout}>
|
||||
Sign Out <i className="fas fa-sign-out-alt" />
|
||||
Logout <i className="fas fa-sign-out-alt" />
|
||||
</a>
|
||||
</li>
|
||||
</Dropdown.Menu>
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import ObjectsSearch from "../objects/ObjectsSearch"
|
||||
import Path from "../objects/Path"
|
||||
import StorageInfo from "./StorageInfo"
|
||||
import BrowserDropdown from "./BrowserDropdown"
|
||||
@@ -27,6 +28,7 @@ export const Header = () => {
|
||||
<header className="fe-header">
|
||||
<Path />
|
||||
{loggedIn && <StorageInfo />}
|
||||
{loggedIn && <ObjectsSearch />}
|
||||
<ul className="feh-actions">
|
||||
{loggedIn ? (
|
||||
<BrowserDropdown />
|
||||
|
||||
@@ -22,7 +22,8 @@ const bucketsFilterSelector = state => state.buckets.filter
|
||||
export const getFilteredBuckets = createSelector(
|
||||
bucketsSelector,
|
||||
bucketsFilterSelector,
|
||||
(buckets, filter) => buckets.filter(bucket => bucket.indexOf(filter) > -1)
|
||||
(buckets, filter) => buckets.filter(
|
||||
bucket => bucket.toLowerCase().indexOf(filter.toLowerCase()) > -1)
|
||||
)
|
||||
|
||||
export const getCurrentBucket = state => state.buckets.currentBucket
|
||||
|
||||
@@ -18,6 +18,7 @@ import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import InfiniteScroll from "react-infinite-scroller"
|
||||
import ObjectsList from "./ObjectsList"
|
||||
import { getFilteredObjects } from "./selectors"
|
||||
|
||||
export class ObjectsListContainer extends React.Component {
|
||||
constructor(props) {
|
||||
@@ -39,22 +40,29 @@ export class ObjectsListContainer extends React.Component {
|
||||
})
|
||||
}
|
||||
}
|
||||
componentDidUpdate(prevProps) {
|
||||
if (this.props.filter !== prevProps.filter) {
|
||||
this.setState({
|
||||
page: 1
|
||||
})
|
||||
}
|
||||
}
|
||||
loadNextPage() {
|
||||
this.setState(state => {
|
||||
return { page: state.page + 1 }
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { objects, listLoading } = this.props
|
||||
const { filteredObjects, listLoading } = this.props
|
||||
|
||||
const visibleObjects = objects.slice(0, this.state.page * 100)
|
||||
const visibleObjects = filteredObjects.slice(0, this.state.page * 100)
|
||||
|
||||
return (
|
||||
<div style={{ position: "relative" }}>
|
||||
<InfiniteScroll
|
||||
pageStart={0}
|
||||
loadMore={this.loadNextPage}
|
||||
hasMore={objects.length > visibleObjects.length}
|
||||
hasMore={filteredObjects.length > visibleObjects.length}
|
||||
useWindow={true}
|
||||
initialLoad={false}
|
||||
>
|
||||
@@ -70,7 +78,8 @@ const mapStateToProps = state => {
|
||||
return {
|
||||
currentBucket: state.buckets.currentBucket,
|
||||
currentPrefix: state.objects.currentPrefix,
|
||||
objects: state.objects.list,
|
||||
filteredObjects: getFilteredObjects(state),
|
||||
filter: state.objects.filter,
|
||||
sortBy: state.objects.sortBy,
|
||||
sortOrder: state.objects.sortOrder,
|
||||
listLoading: state.objects.listLoading
|
||||
|
||||
43
browser/app/js/objects/ObjectsSearch.js
Normal file
43
browser/app/js/objects/ObjectsSearch.js
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import * as actionsObjects from "./actions"
|
||||
|
||||
export const ObjectsSearch = ({ onChange }) => (
|
||||
<div
|
||||
className="input-group ig-left ig-search-dark"
|
||||
style={{ display: "block" }}
|
||||
>
|
||||
<input
|
||||
className="ig-text"
|
||||
type="input"
|
||||
placeholder="Search Objects..."
|
||||
onChange={e => onChange(e.target.value)}
|
||||
/>
|
||||
<i className="ig-helpers" />
|
||||
</div>
|
||||
)
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
return {
|
||||
onChange: filter =>
|
||||
dispatch(actionsObjects.setFilter(filter))
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(undefined, mapDispatchToProps)(ObjectsSearch)
|
||||
@@ -20,13 +20,13 @@ import { ObjectsListContainer } from "../ObjectsListContainer"
|
||||
|
||||
describe("ObjectsList", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsListContainer objects={[]} />)
|
||||
shallow(<ObjectsListContainer filteredObjects={[]} />)
|
||||
})
|
||||
|
||||
it("should render ObjectsList with objects", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
objects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
filteredObjects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.find("ObjectsList").length).toBe(1)
|
||||
@@ -40,7 +40,7 @@ describe("ObjectsList", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
currentBucket="test1"
|
||||
objects={[]}
|
||||
filteredObjects={[]}
|
||||
listLoading={true}
|
||||
/>
|
||||
)
|
||||
|
||||
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { ObjectsSearch } from "../ObjectsSearch"
|
||||
|
||||
describe("ObjectsSearch", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsSearch />)
|
||||
})
|
||||
|
||||
it("should call onChange with search text", () => {
|
||||
const onChange = jest.fn()
|
||||
const wrapper = shallow(<ObjectsSearch onChange={onChange} />)
|
||||
wrapper.find("input").simulate("change", { target: { value: "test" } })
|
||||
expect(onChange).toHaveBeenCalledWith("test")
|
||||
})
|
||||
})
|
||||
@@ -23,6 +23,7 @@ describe("objects reducer", () => {
|
||||
const initialState = reducer(undefined, {})
|
||||
expect(initialState).toEqual({
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
|
||||
@@ -36,6 +36,7 @@ import { getServerInfo, hasServerPublicDomain } from '../browser/selectors'
|
||||
|
||||
export const SET_LIST = "objects/SET_LIST"
|
||||
export const RESET_LIST = "objects/RESET_LIST"
|
||||
export const SET_FILTER = "objects/SET_FILTER"
|
||||
export const APPEND_LIST = "objects/APPEND_LIST"
|
||||
export const REMOVE = "objects/REMOVE"
|
||||
export const SET_SORT_BY = "objects/SET_SORT_BY"
|
||||
@@ -57,6 +58,13 @@ export const resetList = () => ({
|
||||
type: RESET_LIST,
|
||||
})
|
||||
|
||||
export const setFilter = filter => {
|
||||
return {
|
||||
type: SET_FILTER,
|
||||
filter
|
||||
}
|
||||
}
|
||||
|
||||
export const setListLoading = (listLoading) => ({
|
||||
type: SET_LIST_LOADING,
|
||||
listLoading,
|
||||
|
||||
@@ -28,6 +28,7 @@ const removeObject = (list, objectToRemove, lookup) => {
|
||||
export default (
|
||||
state = {
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
@@ -53,6 +54,11 @@ export default (
|
||||
...state,
|
||||
list: []
|
||||
}
|
||||
case actionsObjects.SET_FILTER:
|
||||
return {
|
||||
...state,
|
||||
filter: action.filter
|
||||
}
|
||||
case actionsObjects.SET_LIST_LOADING:
|
||||
return {
|
||||
...state,
|
||||
|
||||
@@ -21,3 +21,13 @@ export const getCurrentPrefix = state => state.objects.currentPrefix
|
||||
export const getCheckedList = state => state.objects.checkedList
|
||||
|
||||
export const getPrefixWritable = state => state.objects.prefixWritable
|
||||
|
||||
const objectsSelector = state => state.objects.list
|
||||
const objectsFilterSelector = state => state.objects.filter
|
||||
|
||||
export const getFilteredObjects = createSelector(
|
||||
objectsSelector,
|
||||
objectsFilterSelector,
|
||||
(objects, filter) => objects.filter(
|
||||
object => object.name.toLowerCase().startsWith(filter.toLowerCase()))
|
||||
)
|
||||
@@ -36,7 +36,7 @@ export class Dropzone extends React.Component {
|
||||
// Overwrite the default styling from react-dropzone; otherwise it
|
||||
// won't handle child elements correctly.
|
||||
const style = {
|
||||
height: "100%",
|
||||
flex: "1",
|
||||
borderWidth: "0",
|
||||
borderStyle: "dashed",
|
||||
borderColor: "#fff"
|
||||
|
||||
@@ -21,7 +21,7 @@ import storage from 'local-storage-fallback'
|
||||
|
||||
class Web {
|
||||
constructor(endpoint) {
|
||||
const namespace = 'Web'
|
||||
const namespace = 'web'
|
||||
this.JSONrpc = new JSONrpc({
|
||||
endpoint,
|
||||
namespace
|
||||
|
||||
@@ -20,7 +20,8 @@
|
||||
@media(max-width: @screen-sm-max) {
|
||||
padding: 75px 0 80px;
|
||||
}
|
||||
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
min-height:100vh;
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
@@ -169,6 +169,24 @@ select.form-control {
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search-dark {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
font-weight: 900;
|
||||
content: '\f002';
|
||||
font-size: 15px;
|
||||
position: absolute;
|
||||
left: 2px;
|
||||
top: 8px;
|
||||
color: rgba(0, 0, 0, 0.5);
|
||||
}
|
||||
|
||||
.ig-text {
|
||||
padding-left: 25px;
|
||||
.placeholder(rgba(0, 0, 0, 0.5))
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
@@ -270,4 +288,4 @@ select.form-control {
|
||||
.set-expire-decrease {
|
||||
bottom: -27px;
|
||||
.rotate(-180deg);
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -44,10 +44,21 @@ func releaseTag(version string) string {
|
||||
relPrefix = prefix
|
||||
}
|
||||
|
||||
relSuffix := ""
|
||||
if hotfix := os.Getenv("MINIO_HOTFIX"); hotfix != "" {
|
||||
relSuffix = hotfix
|
||||
}
|
||||
|
||||
relTag := strings.Replace(version, " ", "-", -1)
|
||||
relTag = strings.Replace(relTag, ":", "-", -1)
|
||||
relTag = strings.Replace(relTag, ",", "", -1)
|
||||
return relPrefix + "." + relTag
|
||||
relTag = relPrefix + "." + relTag
|
||||
|
||||
if relSuffix != "" {
|
||||
relTag += "." + relSuffix
|
||||
}
|
||||
|
||||
return relTag
|
||||
}
|
||||
|
||||
// commitID returns the abbreviated commit-id hash of the last commit.
|
||||
@@ -68,5 +79,12 @@ func commitID() string {
|
||||
}
|
||||
|
||||
func main() {
|
||||
fmt.Println(genLDFlags(time.Now().UTC().Format(time.RFC3339)))
|
||||
var version string
|
||||
if len(os.Args) > 1 {
|
||||
version = os.Args[1]
|
||||
} else {
|
||||
version = time.Now().UTC().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
fmt.Println(genLDFlags(version))
|
||||
}
|
||||
|
||||
@@ -45,88 +45,64 @@ FUNCTIONAL_TESTS="$WORK_DIR/functional-tests.sh"
|
||||
function start_minio_fs()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 10
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure_sets()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk-sets{1...32}" >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
|
||||
"${MINIO[@]}" server > "$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets()
|
||||
function start_minio_pool_erasure_sets()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/pool-disk-sets{1...4} http://127.0.0.1:9001${WORK_DIR}/pool-disk-sets{5...8}"
|
||||
"${MINIO[@]}" server --address ":9000" > "$WORK_DIR/pool-minio-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address ":9001" > "$WORK_DIR/pool-minio-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets_ipv6()
|
||||
function start_minio_pool_erasure_sets_ipv6()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets{5...8}"
|
||||
"${MINIO[@]}" server --address="[::1]:9000" > "$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" > "$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address=:9002 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/dist-disk1 http://127.0.0.1:9001${WORK_DIR}/dist-disk2 http://127.0.0.1:9002${WORK_DIR}/dist-disk3 http://127.0.0.1:9003${WORK_DIR}/dist-disk4"
|
||||
for i in $(seq 0 3); do
|
||||
"${MINIO[@]}" server --address ":900${i}" > "$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
|
||||
done
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function run_test_fs()
|
||||
{
|
||||
minio_pid="$(start_minio_fs)"
|
||||
start_minio_fs
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -137,13 +113,14 @@ function run_test_fs()
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_erasure_sets() {
|
||||
minio_pid="$(start_minio_erasure_sets)"
|
||||
function run_test_erasure_sets()
|
||||
{
|
||||
start_minio_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -154,55 +131,51 @@ function run_test_erasure_sets() {
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_zone_erasure_sets()
|
||||
function run_test_pool_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets) )
|
||||
start_minio_pool_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 1); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/zone-minio-900$i.log"
|
||||
cat "$WORK_DIR/pool-minio-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 1); do
|
||||
rm -f "$WORK_DIR/zone-minio-900$i.log"
|
||||
rm -f "$WORK_DIR/pool-minio-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_zone_erasure_sets_ipv6()
|
||||
function run_test_pool_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets_ipv6) )
|
||||
start_minio_pool_erasure_sets_ipv6
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 1); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/zone-minio-ipv6-900$i.log"
|
||||
cat "$WORK_DIR/pool-minio-ipv6-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 1); do
|
||||
rm -f "$WORK_DIR/zone-minio-ipv6-900$i.log"
|
||||
rm -f "$WORK_DIR/pool-minio-ipv6-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
@@ -210,12 +183,12 @@ function run_test_zone_erasure_sets_ipv6()
|
||||
|
||||
function run_test_erasure()
|
||||
{
|
||||
minio_pid="$(start_minio_erasure)"
|
||||
start_minio_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -228,14 +201,12 @@ function run_test_erasure()
|
||||
|
||||
function run_test_dist_erasure()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure) )
|
||||
start_minio_dist_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -251,7 +222,7 @@ function run_test_dist_erasure()
|
||||
|
||||
rm -f "$WORK_DIR/dist-minio-9000.log" "$WORK_DIR/dist-minio-9001.log" "$WORK_DIR/dist-minio-9002.log" "$WORK_DIR/dist-minio-9003.log"
|
||||
|
||||
return "$rv"
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function purge()
|
||||
@@ -324,14 +295,14 @@ function main()
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Eraure expanded setup"
|
||||
if ! run_test_zone_erasure_sets; then
|
||||
if ! run_test_pool_erasure_sets; then
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Erasure expanded setup with ipv6"
|
||||
if ! run_test_zone_erasure_sets_ipv6; then
|
||||
if ! run_test_pool_erasure_sets_ipv6; then
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
|
||||
@@ -29,44 +29,52 @@ MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
function start_minio_3_node() {
|
||||
declare -a minio_pids
|
||||
declare -a ARGS
|
||||
export MINIO_ACCESS_KEY=minio
|
||||
export MINIO_SECRET_KEY=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
ARGS+=("http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/")
|
||||
args="$args http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/"
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+1]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
disown "${minio_pids[0]}"
|
||||
"${MINIO[@]}" --address ":$[$start_port+1]" $args > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
disown $!
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+2]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
disown "${minio_pids[1]}"
|
||||
"${MINIO[@]}" --address ":$[$start_port+2]" $args > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
disown $!
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+3]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
disown "${minio_pids[2]}"
|
||||
"${MINIO[@]}" --address ":$[$start_port+3]" $args > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
disown $!
|
||||
|
||||
sleep "$1"
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
if ! kill "$pid"; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$(pgrep -c minio)" -ne 3 ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 1;
|
||||
if pgrep minio; then
|
||||
# forcibly killing, to proceed further properly.
|
||||
kill -9 "$pid"
|
||||
sleep 1 # wait 1sec per pid
|
||||
done
|
||||
if ! pkill -9 minio; then
|
||||
echo "no minio process running anymore, proceed."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
@@ -128,6 +127,12 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
defer logger.AuditLog(w, r, "SetBucketTarget", mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
update := r.URL.Query().Get("update") == "true"
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
@@ -135,10 +140,6 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
@@ -163,23 +164,23 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
host, port, err := net.SplitHostPort(target.Endpoint)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
sameTarget, _ := isLocalHost(host, port, globalMinioPort)
|
||||
|
||||
sameTarget, _ := isLocalHost(target.URL().Hostname(), target.URL().Port(), globalMinioPort)
|
||||
if sameTarget && bucket == target.TargetBucket {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBucketRemoteIdenticalToSource), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
target.SourceBucket = bucket
|
||||
target.Arn = globalBucketTargetSys.getRemoteARN(bucket, &target)
|
||||
if !update {
|
||||
target.Arn = globalBucketTargetSys.getRemoteARN(bucket, &target)
|
||||
}
|
||||
if target.Arn == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target); err != nil {
|
||||
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -187,7 +188,6 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
}
|
||||
tgtBytes, err := json.Marshal(&targets)
|
||||
if err != nil {
|
||||
@@ -218,7 +218,10 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arnType := vars["type"]
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
@@ -226,6 +229,11 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
if bucket != "" {
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if _, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -250,16 +258,16 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
|
||||
bucket := vars["bucket"]
|
||||
arn := vars["arn"]
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
|
||||
@@ -42,7 +42,7 @@ import (
|
||||
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return auth.Credentials{}, nil
|
||||
@@ -130,8 +130,8 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
dynamic, err := cfg.ReadConfig(bytes.NewReader(kvBytes))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -158,6 +158,17 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfig(GlobalContext, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
|
||||
// If all values were dynamic, tell the client.
|
||||
if dynamic {
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
}
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
@@ -266,7 +277,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
if _, err = cfg.ReadConfig(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -378,7 +389,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
|
||||
cfg := newServerConfig()
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
if _, err = cfg.ReadConfig(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -35,8 +35,8 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
var adminAPIErr APIErrorCode
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
@@ -129,13 +129,40 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
defer logger.AuditLog(w, r, "GetUserInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
vars := mux.Vars(r)
|
||||
name := vars["accessKey"]
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
name := vars["accessKey"]
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKey := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
accessKey = cred.ParentUser
|
||||
}
|
||||
|
||||
implicitPerm := name == accessKey
|
||||
if !implicitPerm {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Action: iampolicy.GetUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
userInfo, err := globalIAMSys.GetUserInfo(name)
|
||||
if err != nil {
|
||||
@@ -304,7 +331,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
accessKey := vars["accessKey"]
|
||||
status := vars["status"]
|
||||
|
||||
// Custom IAM policies not allowed for admin user.
|
||||
// This API is not allowed to lookup accessKey user status
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
@@ -330,20 +357,47 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
defer logger.AuditLog(w, r, "AddUser", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.CreateUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
// Custom IAM policies not allowed for admin user.
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if cred.IsTemp() || cred.IsServiceAccount() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Not allowed to add a user with same access key as root credential
|
||||
if owner && accessKey == cred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
implicitPerm := accessKey == cred.AccessKey
|
||||
if !implicitPerm {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
@@ -386,8 +440,8 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -398,6 +452,12 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
@@ -411,12 +471,6 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
@@ -465,8 +519,8 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -520,8 +574,8 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -572,15 +626,15 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// AccountUsageInfoHandler returns usage
|
||||
func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AccountUsageInfo")
|
||||
// AccountInfoHandler returns usage
|
||||
func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AccountInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "AccountUsageInfo", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(w, r, "AccountInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -645,8 +699,16 @@ func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http
|
||||
accountName = cred.ParentUser
|
||||
}
|
||||
|
||||
acctInfo := madmin.AccountUsageInfo{
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, false)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
acctInfo := madmin.AccountInfo{
|
||||
AccountName: accountName,
|
||||
Policy: globalIAMSys.GetCombinedPolicy(policies...),
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
@@ -722,7 +784,10 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(policy)
|
||||
if err = json.NewEncoder(w).Encode(policy); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
|
||||
@@ -35,14 +35,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/logger/message/log"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/bandwidth"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
@@ -54,16 +53,13 @@ const (
|
||||
maxEConfigJSONSize = 262272
|
||||
)
|
||||
|
||||
// Type-safe query params.
|
||||
type mgmtQueryKey string
|
||||
|
||||
// Only valid query params for mgmt admin APIs.
|
||||
const (
|
||||
mgmtBucket mgmtQueryKey = "bucket"
|
||||
mgmtPrefix = "prefix"
|
||||
mgmtClientToken = "clientToken"
|
||||
mgmtForceStart = "forceStart"
|
||||
mgmtForceStop = "forceStop"
|
||||
mgmtBucket = "bucket"
|
||||
mgmtPrefix = "prefix"
|
||||
mgmtClientToken = "clientToken"
|
||||
mgmtForceStart = "forceStart"
|
||||
mgmtForceStop = "forceStop"
|
||||
)
|
||||
|
||||
func updateServer(u *url.URL, sha256Sum []byte, lrTime time.Time, mode string) (us madmin.ServerUpdateStatus, err error) {
|
||||
@@ -90,8 +86,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
if globalInplaceUpdateDisabled {
|
||||
// if MINIO_UPDATE=off - inplace update is disabled, mostly
|
||||
// in containers.
|
||||
// if MINIO_UPDATE=off - inplace update is disabled, mostly in containers.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -132,7 +127,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
if lrTime.Sub(crTime) < 0 {
|
||||
if lrTime.Sub(crTime) <= 0 {
|
||||
updateStatus := madmin.ServerUpdateStatus{
|
||||
CurrentVersion: Version,
|
||||
UpdatedVersion: Version,
|
||||
@@ -210,9 +205,10 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
var objectAPI ObjectLayer
|
||||
if serviceSig == serviceRestart {
|
||||
switch serviceSig {
|
||||
case serviceRestart:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceRestartAdminAction)
|
||||
} else {
|
||||
case serviceStop:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceStopAdminAction)
|
||||
}
|
||||
if objectAPI == nil {
|
||||
@@ -298,6 +294,20 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
// ignores any errors here.
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx, false)
|
||||
|
||||
// Collect any disk healing.
|
||||
healing, _ := getAggregatedBackgroundHealState(ctx)
|
||||
healDisks := make(map[string]struct{}, len(healing.HealDisks))
|
||||
for _, disk := range healing.HealDisks {
|
||||
healDisks[disk] = struct{}{}
|
||||
}
|
||||
|
||||
// find all disks which belong to each respective endpoints
|
||||
for i, disk := range storageInfo.Disks {
|
||||
if _, ok := healDisks[disk.Endpoint]; ok {
|
||||
storageInfo.Disks[i].Healing = true
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(storageInfo)
|
||||
if err != nil {
|
||||
@@ -345,7 +355,9 @@ func lriToLockEntry(l lockRequesterInfo, resource, server string) *madmin.LockEn
|
||||
Resource: resource,
|
||||
ServerList: []string{server},
|
||||
Source: l.Source,
|
||||
Owner: l.Owner,
|
||||
ID: l.UID,
|
||||
Quorum: l.Quorum,
|
||||
}
|
||||
if l.Writer {
|
||||
entry.Type = "WRITE"
|
||||
@@ -355,39 +367,40 @@ func lriToLockEntry(l lockRequesterInfo, resource, server string) *madmin.LockEn
|
||||
return entry
|
||||
}
|
||||
|
||||
func topLockEntries(peerLocks []*PeerLocks, count int) madmin.LockEntries {
|
||||
func topLockEntries(peerLocks []*PeerLocks, stale bool) madmin.LockEntries {
|
||||
entryMap := make(map[string]*madmin.LockEntry)
|
||||
for _, peerLock := range peerLocks {
|
||||
if peerLock == nil {
|
||||
continue
|
||||
}
|
||||
for _, locks := range peerLock.Locks {
|
||||
for k, v := range locks {
|
||||
for _, lockReqInfo := range v {
|
||||
if val, ok := entryMap[lockReqInfo.UID]; ok {
|
||||
val.ServerList = append(val.ServerList, peerLock.Addr)
|
||||
} else {
|
||||
entryMap[lockReqInfo.UID] = lriToLockEntry(lockReqInfo, k, peerLock.Addr)
|
||||
}
|
||||
for k, v := range peerLock.Locks {
|
||||
for _, lockReqInfo := range v {
|
||||
if val, ok := entryMap[lockReqInfo.UID]; ok {
|
||||
val.ServerList = append(val.ServerList, peerLock.Addr)
|
||||
} else {
|
||||
entryMap[lockReqInfo.UID] = lriToLockEntry(lockReqInfo, k, peerLock.Addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
var lockEntries = make(madmin.LockEntries, 0, len(entryMap))
|
||||
var lockEntries madmin.LockEntries
|
||||
for _, v := range entryMap {
|
||||
lockEntries = append(lockEntries, *v)
|
||||
if stale {
|
||||
lockEntries = append(lockEntries, *v)
|
||||
continue
|
||||
}
|
||||
if len(v.ServerList) >= v.Quorum {
|
||||
lockEntries = append(lockEntries, *v)
|
||||
}
|
||||
}
|
||||
sort.Sort(lockEntries)
|
||||
if len(lockEntries) > count {
|
||||
lockEntries = lockEntries[:count]
|
||||
}
|
||||
return lockEntries
|
||||
}
|
||||
|
||||
// PeerLocks holds server information result of one node
|
||||
type PeerLocks struct {
|
||||
Addr string
|
||||
Locks GetLocksResp
|
||||
Locks map[string][]lockRequesterInfo
|
||||
}
|
||||
|
||||
// TopLocksHandler Get list of locks in use
|
||||
@@ -410,23 +423,17 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
}
|
||||
stale := r.URL.Query().Get("stale") == "true" // list also stale locks
|
||||
|
||||
peerLocks := globalNotificationSys.GetLocks(ctx)
|
||||
// Once we have received all the locks currently used from peers
|
||||
// add the local peer locks list as well.
|
||||
var getRespLocks GetLocksResp
|
||||
for _, llocker := range globalLockServers {
|
||||
getRespLocks = append(getRespLocks, llocker.DupLockMap())
|
||||
peerLocks := globalNotificationSys.GetLocks(ctx, r)
|
||||
|
||||
topLocks := topLockEntries(peerLocks, stale)
|
||||
|
||||
// Marshal API response upto requested count.
|
||||
if len(topLocks) > count && count > 0 {
|
||||
topLocks = topLocks[:count]
|
||||
}
|
||||
|
||||
peerLocks = append(peerLocks, &PeerLocks{
|
||||
Addr: getHostName(r),
|
||||
Locks: getRespLocks,
|
||||
})
|
||||
|
||||
topLocks := topLockEntries(peerLocks, count)
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(topLocks)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -572,8 +579,8 @@ type healInitParams struct {
|
||||
|
||||
// extractHealInitParams - Validates params for heal init API.
|
||||
func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reader) (hip healInitParams, err APIErrorCode) {
|
||||
hip.bucket = vars[string(mgmtBucket)]
|
||||
hip.objPrefix = vars[string(mgmtPrefix)]
|
||||
hip.bucket = vars[mgmtBucket]
|
||||
hip.objPrefix = vars[mgmtPrefix]
|
||||
|
||||
if hip.bucket == "" {
|
||||
if hip.objPrefix != "" {
|
||||
@@ -592,13 +599,13 @@ func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reade
|
||||
return
|
||||
}
|
||||
|
||||
if len(qParms[string(mgmtClientToken)]) > 0 {
|
||||
hip.clientToken = qParms[string(mgmtClientToken)][0]
|
||||
if len(qParms[mgmtClientToken]) > 0 {
|
||||
hip.clientToken = qParms[mgmtClientToken][0]
|
||||
}
|
||||
if _, ok := qParms[string(mgmtForceStart)]; ok {
|
||||
if _, ok := qParms[mgmtForceStart]; ok {
|
||||
hip.forceStart = true
|
||||
}
|
||||
if _, ok := qParms[string(mgmtForceStop)]; ok {
|
||||
if _, ok := qParms[mgmtForceStop]; ok {
|
||||
hip.forceStop = true
|
||||
}
|
||||
|
||||
@@ -787,7 +794,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
case hip.clientToken == "":
|
||||
nh := newHealSequence(GlobalContext, hip.bucket, hip.objPrefix, handlers.GetSourceIP(r), hip.hs, hip.forceStart)
|
||||
go func() {
|
||||
respBytes, apiErr, errMsg := globalAllHealState.LaunchNewHealSequence(nh)
|
||||
respBytes, apiErr, errMsg := globalAllHealState.LaunchNewHealSequence(nh, objectAPI)
|
||||
hr := healResp{respBytes, apiErr, errMsg}
|
||||
respCh <- hr
|
||||
}()
|
||||
@@ -886,7 +893,7 @@ func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Reques
|
||||
var cred auth.Credentials
|
||||
var adminAPIErr APIErrorCode
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, cred
|
||||
@@ -999,6 +1006,15 @@ func mustTrace(entry interface{}, trcAll, errOnly bool) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Handle browser requests separately filter them and return.
|
||||
if HasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+"/upload") {
|
||||
if errOnly {
|
||||
return trcInfo.RespInfo.StatusCode >= http.StatusBadRequest
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
trace := trcAll || !HasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+SlashSeparator)
|
||||
if errOnly {
|
||||
return trace && trcInfo.RespInfo.StatusCode >= http.StatusBadRequest
|
||||
@@ -1028,7 +1044,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Use buffered channel to take care of burst sends or slow w.Write()
|
||||
traceCh := make(chan interface{}, 4000)
|
||||
|
||||
peers := newPeerRestClients(globalEndpoints)
|
||||
peers, _ := newPeerRestClients(globalEndpoints)
|
||||
|
||||
globalHTTPTrace.Subscribe(traceCh, ctx.Done(), func(entry interface{}) bool {
|
||||
return mustTrace(entry, trcAll, trcErr)
|
||||
@@ -1096,7 +1112,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
logCh := make(chan interface{}, 4000)
|
||||
|
||||
peers := newPeerRestClients(globalEndpoints)
|
||||
peers, _ := newPeerRestClients(globalEndpoints)
|
||||
|
||||
globalConsoleSys.Subscribe(logCh, ctx.Done(), node, limitLines, logKind, nil)
|
||||
|
||||
@@ -1228,26 +1244,26 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
||||
writeSuccessResponseJSON(w, resp)
|
||||
}
|
||||
|
||||
// OBDInfoHandler - GET /minio/admin/v3/obdinfo
|
||||
// HealthInfoHandler - GET /minio/admin/v3/healthinfo
|
||||
// ----------
|
||||
// Get server on-board diagnostics
|
||||
func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "OBDInfo")
|
||||
// Get server health info
|
||||
func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HealthInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "OBDInfo", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(w, r, "HealthInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.OBDInfoAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
obdInfo := madmin.OBDInfo{}
|
||||
obdInfoCh := make(chan madmin.OBDInfo)
|
||||
query := r.URL.Query()
|
||||
healthInfo := madmin.HealthInfo{}
|
||||
healthInfoCh := make(chan madmin.HealthInfo)
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
partialWrite := func(oinfo madmin.OBDInfo) {
|
||||
obdInfoCh <- oinfo
|
||||
partialWrite := func(oinfo madmin.HealthInfo) {
|
||||
healthInfoCh <- oinfo
|
||||
}
|
||||
|
||||
setCommonHeaders(w)
|
||||
@@ -1260,8 +1276,8 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
errorResponse := getAPIErrorResponse(ctx, toAdminAPIErr(ctx, err), r.URL.String(),
|
||||
w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
obdInfo.Error = string(encodedErrorResponse)
|
||||
logger.LogIf(ctx, enc.Encode(obdInfo))
|
||||
healthInfo.Error = string(encodedErrorResponse)
|
||||
logger.LogIf(ctx, enc.Encode(healthInfo))
|
||||
}
|
||||
|
||||
deadline := 3600 * time.Second
|
||||
@@ -1277,108 +1293,109 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
deadlinedCtx, cancel := context.WithTimeout(ctx, deadline)
|
||||
defer cancel()
|
||||
|
||||
nsLock := objectAPI.NewNSLock(ctx, minioMetaBucket, "obd-in-progress")
|
||||
if err := nsLock.GetLock(newDynamicTimeout(deadline, deadline)); err != nil { // returns a locked lock
|
||||
nsLock := objectAPI.NewNSLock(minioMetaBucket, "health-check-in-progress")
|
||||
if err := nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline)); err != nil { // returns a locked lock
|
||||
errResp(err)
|
||||
return
|
||||
}
|
||||
defer nsLock.Unlock()
|
||||
|
||||
go func() {
|
||||
defer close(obdInfoCh)
|
||||
defer close(healthInfoCh)
|
||||
|
||||
if cpu, ok := vars["syscpu"]; ok && cpu == "true" {
|
||||
cpuInfo := getLocalCPUOBDInfo(deadlinedCtx, r)
|
||||
if cpu := query.Get("syscpu"); cpu == "true" {
|
||||
cpuInfo := getLocalCPUInfo(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.CPUInfo = append(obdInfo.Sys.CPUInfo, cpuInfo)
|
||||
obdInfo.Sys.CPUInfo = append(obdInfo.Sys.CPUInfo, globalNotificationSys.CPUOBDInfo(deadlinedCtx)...)
|
||||
partialWrite(obdInfo)
|
||||
healthInfo.Sys.CPUInfo = append(healthInfo.Sys.CPUInfo, cpuInfo)
|
||||
healthInfo.Sys.CPUInfo = append(healthInfo.Sys.CPUInfo, globalNotificationSys.CPUInfo(deadlinedCtx)...)
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
|
||||
if diskHw, ok := vars["sysdiskhw"]; ok && diskHw == "true" {
|
||||
diskHwInfo := getLocalDiskHwOBD(deadlinedCtx, r)
|
||||
if diskHw := query.Get("sysdiskhw"); diskHw == "true" {
|
||||
diskHwInfo := getLocalDiskHwInfo(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.DiskHwInfo = append(obdInfo.Sys.DiskHwInfo, diskHwInfo)
|
||||
obdInfo.Sys.DiskHwInfo = append(obdInfo.Sys.DiskHwInfo, globalNotificationSys.DiskHwOBDInfo(deadlinedCtx)...)
|
||||
partialWrite(obdInfo)
|
||||
healthInfo.Sys.DiskHwInfo = append(healthInfo.Sys.DiskHwInfo, diskHwInfo)
|
||||
healthInfo.Sys.DiskHwInfo = append(healthInfo.Sys.DiskHwInfo, globalNotificationSys.DiskHwInfo(deadlinedCtx)...)
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
|
||||
if osInfo, ok := vars["sysosinfo"]; ok && osInfo == "true" {
|
||||
osInfo := getLocalOsInfoOBD(deadlinedCtx, r)
|
||||
if osInfo := query.Get("sysosinfo"); osInfo == "true" {
|
||||
osInfo := getLocalOsInfo(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.OsInfo = append(obdInfo.Sys.OsInfo, osInfo)
|
||||
obdInfo.Sys.OsInfo = append(obdInfo.Sys.OsInfo, globalNotificationSys.OsOBDInfo(deadlinedCtx)...)
|
||||
partialWrite(obdInfo)
|
||||
healthInfo.Sys.OsInfo = append(healthInfo.Sys.OsInfo, osInfo)
|
||||
healthInfo.Sys.OsInfo = append(healthInfo.Sys.OsInfo, globalNotificationSys.OsInfo(deadlinedCtx)...)
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
|
||||
if mem, ok := vars["sysmem"]; ok && mem == "true" {
|
||||
memInfo := getLocalMemOBD(deadlinedCtx, r)
|
||||
if mem := query.Get("sysmem"); mem == "true" {
|
||||
memInfo := getLocalMemInfo(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.MemInfo = append(obdInfo.Sys.MemInfo, memInfo)
|
||||
obdInfo.Sys.MemInfo = append(obdInfo.Sys.MemInfo, globalNotificationSys.MemOBDInfo(deadlinedCtx)...)
|
||||
partialWrite(obdInfo)
|
||||
healthInfo.Sys.MemInfo = append(healthInfo.Sys.MemInfo, memInfo)
|
||||
healthInfo.Sys.MemInfo = append(healthInfo.Sys.MemInfo, globalNotificationSys.MemInfo(deadlinedCtx)...)
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
|
||||
if proc, ok := vars["sysprocess"]; ok && proc == "true" {
|
||||
procInfo := getLocalProcOBD(deadlinedCtx, r)
|
||||
if proc := query.Get("sysprocess"); proc == "true" {
|
||||
procInfo := getLocalProcInfo(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.ProcInfo = append(obdInfo.Sys.ProcInfo, procInfo)
|
||||
obdInfo.Sys.ProcInfo = append(obdInfo.Sys.ProcInfo, globalNotificationSys.ProcOBDInfo(deadlinedCtx)...)
|
||||
partialWrite(obdInfo)
|
||||
healthInfo.Sys.ProcInfo = append(healthInfo.Sys.ProcInfo, procInfo)
|
||||
healthInfo.Sys.ProcInfo = append(healthInfo.Sys.ProcInfo, globalNotificationSys.ProcInfo(deadlinedCtx)...)
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
|
||||
if config, ok := vars["minioconfig"]; ok && config == "true" {
|
||||
if config := query.Get("minioconfig"); config == "true" {
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
logger.LogIf(ctx, err)
|
||||
obdInfo.Minio.Config = cfg
|
||||
partialWrite(obdInfo)
|
||||
healthInfo.Minio.Config = cfg
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
|
||||
if drive, ok := vars["perfdrive"]; ok && drive == "true" {
|
||||
// Get drive obd details from local server's drive(s)
|
||||
driveOBDSerial := getLocalDrivesOBD(deadlinedCtx, false, globalEndpoints, r)
|
||||
driveOBDParallel := getLocalDrivesOBD(deadlinedCtx, true, globalEndpoints, r)
|
||||
if drive := query.Get("perfdrive"); drive == "true" {
|
||||
// Get drive perf details from local server's drive(s)
|
||||
drivePerfSerial := getLocalDrives(deadlinedCtx, false, globalEndpoints, r)
|
||||
drivePerfParallel := getLocalDrives(deadlinedCtx, true, globalEndpoints, r)
|
||||
|
||||
errStr := ""
|
||||
if driveOBDSerial.Error != "" {
|
||||
errStr = "serial: " + driveOBDSerial.Error
|
||||
if drivePerfSerial.Error != "" {
|
||||
errStr = "serial: " + drivePerfSerial.Error
|
||||
}
|
||||
if driveOBDParallel.Error != "" {
|
||||
errStr = errStr + " parallel: " + driveOBDParallel.Error
|
||||
if drivePerfParallel.Error != "" {
|
||||
errStr = errStr + " parallel: " + drivePerfParallel.Error
|
||||
}
|
||||
|
||||
driveOBD := madmin.ServerDrivesOBDInfo{
|
||||
Addr: driveOBDSerial.Addr,
|
||||
Serial: driveOBDSerial.Serial,
|
||||
Parallel: driveOBDParallel.Parallel,
|
||||
driveInfo := madmin.ServerDrivesInfo{
|
||||
Addr: drivePerfSerial.Addr,
|
||||
Serial: drivePerfSerial.Serial,
|
||||
Parallel: drivePerfParallel.Parallel,
|
||||
Error: errStr,
|
||||
}
|
||||
obdInfo.Perf.DriveInfo = append(obdInfo.Perf.DriveInfo, driveOBD)
|
||||
partialWrite(obdInfo)
|
||||
healthInfo.Perf.DriveInfo = append(healthInfo.Perf.DriveInfo, driveInfo)
|
||||
partialWrite(healthInfo)
|
||||
|
||||
// Notify all other MinIO peers to report drive obd numbers
|
||||
driveOBDs := globalNotificationSys.DriveOBDInfoChan(deadlinedCtx)
|
||||
for obd := range driveOBDs {
|
||||
obdInfo.Perf.DriveInfo = append(obdInfo.Perf.DriveInfo, obd)
|
||||
partialWrite(obdInfo)
|
||||
// Notify all other MinIO peers to report drive perf numbers
|
||||
driveInfos := globalNotificationSys.DrivePerfInfoChan(deadlinedCtx)
|
||||
for obd := range driveInfos {
|
||||
healthInfo.Perf.DriveInfo = append(healthInfo.Perf.DriveInfo, obd)
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
partialWrite(obdInfo)
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
|
||||
if net, ok := vars["perfnet"]; ok && net == "true" && globalIsDistErasure {
|
||||
obdInfo.Perf.Net = append(obdInfo.Perf.Net, globalNotificationSys.NetOBDInfo(deadlinedCtx))
|
||||
partialWrite(obdInfo)
|
||||
if net := query.Get("perfnet"); net == "true" && globalIsDistErasure {
|
||||
healthInfo.Perf.Net = append(healthInfo.Perf.Net, globalNotificationSys.NetInfo(deadlinedCtx))
|
||||
partialWrite(healthInfo)
|
||||
|
||||
netOBDs := globalNotificationSys.DispatchNetOBDChan(deadlinedCtx)
|
||||
for obd := range netOBDs {
|
||||
obdInfo.Perf.Net = append(obdInfo.Perf.Net, obd)
|
||||
partialWrite(obdInfo)
|
||||
netInfos := globalNotificationSys.DispatchNetPerfChan(deadlinedCtx)
|
||||
for netInfo := range netInfos {
|
||||
healthInfo.Perf.Net = append(healthInfo.Perf.Net, netInfo)
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
partialWrite(obdInfo)
|
||||
partialWrite(healthInfo)
|
||||
|
||||
obdInfo.Perf.NetParallel = globalNotificationSys.NetOBDParallelInfo(deadlinedCtx)
|
||||
partialWrite(obdInfo)
|
||||
healthInfo.Perf.NetParallel = globalNotificationSys.NetPerfParallelInfo(deadlinedCtx)
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
@@ -1386,7 +1403,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
for {
|
||||
select {
|
||||
case oinfo, ok := <-obdInfoCh:
|
||||
case oinfo, ok := <-healthInfoCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -1405,6 +1422,59 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
}
|
||||
|
||||
// BandwidthMonitorHandler - GET /minio/admin/v3/bandwidth
|
||||
// ----------
|
||||
// Get bandwidth consumption information
|
||||
func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "BandwidthMonitor")
|
||||
|
||||
defer logger.AuditLog(w, r, "BandwidthMonitor", mustGetClaimsFromToken(r))
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.BandwidthMonitorAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
reportCh := make(chan bandwidth.Report, 1)
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
bucketsRequestedString := r.URL.Query().Get("buckets")
|
||||
bucketsRequested := strings.Split(bucketsRequestedString, ",")
|
||||
go func() {
|
||||
for {
|
||||
reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case report := <-reportCh:
|
||||
enc := json.NewEncoder(w)
|
||||
err := enc.Encode(report)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-keepAliveTicker.C:
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ServerInfoHandler - GET /minio/admin/v3/info
|
||||
// ----------
|
||||
// Get server information
|
||||
@@ -1418,12 +1488,6 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
buckets := madmin.Buckets{}
|
||||
objects := madmin.Objects{}
|
||||
usage := madmin.Usage{}
|
||||
@@ -1435,7 +1499,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
|
||||
}
|
||||
|
||||
vault := fetchVaultStatus(cfg)
|
||||
vault := fetchVaultStatus()
|
||||
|
||||
ldap := madmin.LDAP{}
|
||||
if globalLDAPConfig.Enabled {
|
||||
@@ -1451,10 +1515,10 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
|
||||
log, audit := fetchLoggerInfo(cfg)
|
||||
log, audit := fetchLoggerInfo()
|
||||
|
||||
// Get the notification target info
|
||||
notifyTarget := fetchLambdaInfo(cfg)
|
||||
notifyTarget := fetchLambdaInfo()
|
||||
|
||||
// Fetching the Storage information, ignore any errors.
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx, false)
|
||||
@@ -1476,11 +1540,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
|
||||
mode := "safemode"
|
||||
if newObjectLayerFn() != nil {
|
||||
mode = "online"
|
||||
}
|
||||
|
||||
mode := "online"
|
||||
server := getLocalServerProperty(globalEndpoints, r)
|
||||
servers := globalNotificationSys.ServerInfo()
|
||||
servers = append(servers, server)
|
||||
@@ -1494,20 +1554,10 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
Notifications: notifyTarget,
|
||||
}
|
||||
|
||||
// Collect any disk healing.
|
||||
healing, _ := getAggregatedBackgroundHealState(ctx)
|
||||
healDisks := make(map[string]struct{}, len(healing.HealDisks))
|
||||
for _, disk := range healing.HealDisks {
|
||||
healDisks[disk] = struct{}{}
|
||||
}
|
||||
|
||||
// find all disks which belong to each respective endpoints
|
||||
for i := range servers {
|
||||
for _, disk := range storageInfo.Disks {
|
||||
if strings.Contains(disk.Endpoint, servers[i].Endpoint) {
|
||||
if _, ok := healDisks[disk.Endpoint]; ok {
|
||||
disk.Healing = true
|
||||
}
|
||||
servers[i].Disks = append(servers[i].Disks, disk)
|
||||
}
|
||||
}
|
||||
@@ -1519,9 +1569,6 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
continue
|
||||
}
|
||||
if disk.Endpoint == disk.DrivePath {
|
||||
if _, ok := healDisks[disk.Endpoint]; ok {
|
||||
disk.Healing = true
|
||||
}
|
||||
servers[len(servers)-1].Disks = append(servers[len(servers)-1].Disks, disk)
|
||||
}
|
||||
}
|
||||
@@ -1547,27 +1594,33 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
//Reply with storage information (across nodes in a
|
||||
// Reply with storage information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
}
|
||||
|
||||
func fetchLambdaInfo(cfg config.Config) []map[string][]madmin.TargetIDStatus {
|
||||
|
||||
// Fetch the configured targets
|
||||
tr := NewGatewayHTTPTransport()
|
||||
defer tr.CloseIdleConnections()
|
||||
targetList, err := notify.FetchRegisteredTargets(GlobalContext, cfg, tr, true, false)
|
||||
if err != nil && err != notify.ErrTargetsOffline {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return nil
|
||||
}
|
||||
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
|
||||
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
||||
|
||||
for targetID, target := range targetList.TargetMap() {
|
||||
for _, tgt := range globalConfigTargetList.Targets() {
|
||||
targetIDStatus := make(map[string]madmin.Status)
|
||||
active, _ := target.IsActive()
|
||||
active, _ := tgt.IsActive()
|
||||
targetID := tgt.ID()
|
||||
if active {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
|
||||
} else {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Offline"}
|
||||
}
|
||||
list := lambdaMap[targetID.Name]
|
||||
list = append(list, targetIDStatus)
|
||||
lambdaMap[targetID.Name] = list
|
||||
}
|
||||
|
||||
for _, tgt := range globalEnvTargetList.Targets() {
|
||||
targetIDStatus := make(map[string]madmin.Status)
|
||||
active, _ := tgt.IsActive()
|
||||
targetID := tgt.ID()
|
||||
if active {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
|
||||
} else {
|
||||
@@ -1576,8 +1629,6 @@ func fetchLambdaInfo(cfg config.Config) []map[string][]madmin.TargetIDStatus {
|
||||
list := lambdaMap[targetID.Name]
|
||||
list = append(list, targetIDStatus)
|
||||
lambdaMap[targetID.Name] = list
|
||||
// Close any leaking connections
|
||||
_ = target.Close()
|
||||
}
|
||||
|
||||
notify := make([]map[string][]madmin.TargetIDStatus, len(lambdaMap))
|
||||
@@ -1592,7 +1643,7 @@ func fetchLambdaInfo(cfg config.Config) []map[string][]madmin.TargetIDStatus {
|
||||
}
|
||||
|
||||
// fetchVaultStatus fetches Vault Info
|
||||
func fetchVaultStatus(cfg config.Config) madmin.Vault {
|
||||
func fetchVaultStatus() madmin.Vault {
|
||||
vault := madmin.Vault{}
|
||||
if GlobalKMS == nil {
|
||||
vault.Status = "disabled"
|
||||
@@ -1635,41 +1686,42 @@ func fetchVaultStatus(cfg config.Config) madmin.Vault {
|
||||
}
|
||||
|
||||
// fetchLoggerDetails return log info
|
||||
func fetchLoggerInfo(cfg config.Config) ([]madmin.Logger, []madmin.Audit) {
|
||||
loggerCfg, _ := logger.LookupConfig(cfg)
|
||||
|
||||
var logger []madmin.Logger
|
||||
var auditlogger []madmin.Audit
|
||||
for log, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
err := checkConnection(l.Endpoint, 15*time.Second)
|
||||
func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
var loggerInfo []madmin.Logger
|
||||
var auditloggerInfo []madmin.Audit
|
||||
for _, target := range logger.Targets {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
if err == nil {
|
||||
mapLog := make(map[string]madmin.Status)
|
||||
mapLog[log] = madmin.Status{Status: "Online"}
|
||||
logger = append(logger, mapLog)
|
||||
mapLog[tgt] = madmin.Status{Status: "Online"}
|
||||
loggerInfo = append(loggerInfo, mapLog)
|
||||
} else {
|
||||
mapLog := make(map[string]madmin.Status)
|
||||
mapLog[log] = madmin.Status{Status: "offline"}
|
||||
logger = append(logger, mapLog)
|
||||
mapLog[tgt] = madmin.Status{Status: "offline"}
|
||||
loggerInfo = append(loggerInfo, mapLog)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for audit, l := range loggerCfg.Audit {
|
||||
if l.Enabled {
|
||||
err := checkConnection(l.Endpoint, 15*time.Second)
|
||||
for _, target := range logger.AuditTargets {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
if err == nil {
|
||||
mapAudit := make(map[string]madmin.Status)
|
||||
mapAudit[audit] = madmin.Status{Status: "Online"}
|
||||
auditlogger = append(auditlogger, mapAudit)
|
||||
mapAudit[tgt] = madmin.Status{Status: "Online"}
|
||||
auditloggerInfo = append(auditloggerInfo, mapAudit)
|
||||
} else {
|
||||
mapAudit := make(map[string]madmin.Status)
|
||||
mapAudit[audit] = madmin.Status{Status: "Offline"}
|
||||
auditlogger = append(auditlogger, mapAudit)
|
||||
mapAudit[tgt] = madmin.Status{Status: "Offline"}
|
||||
auditloggerInfo = append(auditloggerInfo, mapAudit)
|
||||
}
|
||||
}
|
||||
}
|
||||
return logger, auditlogger
|
||||
|
||||
return loggerInfo, auditloggerInfo
|
||||
}
|
||||
|
||||
// checkConnection - ping an endpoint , return err in case of no connection
|
||||
|
||||
@@ -105,8 +105,8 @@ func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error)
|
||||
}
|
||||
|
||||
globalPolicySys = NewPolicySys()
|
||||
objLayer := &erasureZones{zones: make([]*erasureSets, 1)}
|
||||
objLayer.zones[0], err = newErasureSets(ctx, endpoints, storageDisks, format)
|
||||
objLayer := &erasureServerPools{serverPools: make([]*erasureSets, 1)}
|
||||
objLayer.serverPools[0], err = newErasureSets(ctx, endpoints, storageDisks, format)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -327,13 +327,13 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
mkParams := func(clientToken string, forceStart, forceStop bool) url.Values {
|
||||
v := url.Values{}
|
||||
if clientToken != "" {
|
||||
v.Add(string(mgmtClientToken), clientToken)
|
||||
v.Add(mgmtClientToken, clientToken)
|
||||
}
|
||||
if forceStart {
|
||||
v.Add(string(mgmtForceStart), "")
|
||||
v.Add(mgmtForceStart, "")
|
||||
}
|
||||
if forceStop {
|
||||
v.Add(string(mgmtForceStop), "")
|
||||
v.Add(mgmtForceStop, "")
|
||||
}
|
||||
return v
|
||||
}
|
||||
@@ -351,11 +351,11 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
}
|
||||
varsArr := []map[string]string{
|
||||
// Invalid cases
|
||||
{string(mgmtPrefix): "objprefix"},
|
||||
{mgmtPrefix: "objprefix"},
|
||||
// Valid cases
|
||||
{},
|
||||
{string(mgmtBucket): "bucket"},
|
||||
{string(mgmtBucket): "bucket", string(mgmtPrefix): "objprefix"},
|
||||
{mgmtBucket: "bucket"},
|
||||
{mgmtBucket: "bucket", mgmtPrefix: "objprefix"},
|
||||
}
|
||||
|
||||
// Body is always valid - we do not test JSON decoding.
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -85,7 +86,7 @@ type healSequenceStatus struct {
|
||||
|
||||
// structure to hold state of all heal sequences in server memory
|
||||
type allHealState struct {
|
||||
sync.Mutex
|
||||
sync.RWMutex
|
||||
|
||||
// map of heal path to heal sequence
|
||||
healSeqMap map[string]*healSequence
|
||||
@@ -105,21 +106,21 @@ func newHealState() *allHealState {
|
||||
}
|
||||
|
||||
func (ahs *allHealState) healDriveCount() int {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
return len(ahs.healLocalDisks)
|
||||
}
|
||||
|
||||
func (ahs *allHealState) getHealLocalDisks() Endpoints {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
var healLocalDisks Endpoints
|
||||
var endpoints Endpoints
|
||||
for ep := range ahs.healLocalDisks {
|
||||
healLocalDisks = append(healLocalDisks, ep)
|
||||
endpoints = append(endpoints, ep)
|
||||
}
|
||||
return healLocalDisks
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
@@ -228,7 +229,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
// `keepHealSeqStateDuration`. This function also launches a
|
||||
// background routine to clean up heal results after the
|
||||
// aforementioned duration.
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) (
|
||||
respBytes []byte, apiErr APIError, errMsg string) {
|
||||
|
||||
if h.forceStarted {
|
||||
@@ -265,7 +266,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
ahs.healSeqMap[hpath] = h
|
||||
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart()
|
||||
go h.healSequenceStart(objAPI)
|
||||
|
||||
clientToken := h.clientToken
|
||||
if globalIsDistErasure {
|
||||
@@ -528,7 +529,7 @@ func (h *healSequence) hasEnded() bool {
|
||||
if h.clientToken == bgHealingUUID {
|
||||
return false
|
||||
}
|
||||
return len(h.currentStatus.Items) == 0 || h.currentStatus.Summary == healStoppedStatus || h.currentStatus.Summary == healFinishedStatus
|
||||
return !h.endTime.IsZero()
|
||||
}
|
||||
|
||||
// stops the heal sequence - safe to call multiple times.
|
||||
@@ -609,7 +610,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
// routine for completion, and (2) listens for external stop
|
||||
// signals. When either event happens, it sets the finish status for
|
||||
// the heal-sequence.
|
||||
func (h *healSequence) healSequenceStart() {
|
||||
func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
|
||||
// Set status as running
|
||||
h.mutex.Lock()
|
||||
h.currentStatus.Summary = healRunningStatus
|
||||
@@ -617,7 +618,7 @@ func (h *healSequence) healSequenceStart() {
|
||||
h.mutex.Unlock()
|
||||
|
||||
if h.sourceCh == nil {
|
||||
go h.traverseAndHeal()
|
||||
go h.traverseAndHeal(objAPI)
|
||||
} else {
|
||||
go h.healFromSourceCh()
|
||||
}
|
||||
@@ -657,6 +658,10 @@ func (h *healSequence) healSequenceStart() {
|
||||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
|
||||
globalHealConfigMu.Lock()
|
||||
opts := globalHealConfig
|
||||
globalHealConfigMu.Unlock()
|
||||
|
||||
// Send heal request
|
||||
task := healTask{
|
||||
bucket: source.bucket,
|
||||
@@ -667,8 +672,15 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
}
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
} else {
|
||||
if opts.Bitrot {
|
||||
task.opts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(opts.IOCount, opts.Sleep)
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
@@ -766,28 +778,23 @@ func (h *healSequence) healFromSourceCh() {
|
||||
h.healItemsFromSourceCh()
|
||||
}
|
||||
|
||||
func (h *healSequence) healDiskMeta() error {
|
||||
// Start with format healing
|
||||
if err := h.healDiskFormat(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
|
||||
// Start healing the config prefix.
|
||||
if err := h.healMinioSysMeta(minioConfigPrefix)(); err != nil {
|
||||
if err := h.healMinioSysMeta(objAPI, minioConfigPrefix)(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start healing the bucket config prefix.
|
||||
return h.healMinioSysMeta(bucketConfigPrefix)()
|
||||
return h.healMinioSysMeta(objAPI, bucketConfigPrefix)()
|
||||
}
|
||||
|
||||
func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
if err := h.healDiskMeta(); err != nil {
|
||||
func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
if err := h.healDiskMeta(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Heal buckets and objects
|
||||
return h.healBuckets(bucketsOnly)
|
||||
return h.healBuckets(objAPI, bucketsOnly)
|
||||
}
|
||||
|
||||
// traverseAndHeal - traverses on-disk data and performs healing
|
||||
@@ -797,30 +804,29 @@ func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
// quit signal is received, this routine cannot quit immediately and
|
||||
// has to wait until a safe point is reached, such as between scanning
|
||||
// two objects.
|
||||
func (h *healSequence) traverseAndHeal() {
|
||||
func (h *healSequence) traverseAndHeal(objAPI ObjectLayer) {
|
||||
bucketsOnly := false // Heals buckets and objects also.
|
||||
h.traverseAndHealDoneCh <- h.healItems(bucketsOnly)
|
||||
h.traverseAndHealDoneCh <- h.healItems(objAPI, bucketsOnly)
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
// healMinioSysMeta - heals all files under a given meta prefix, returns a function
|
||||
// which in-turn heals the respective meta directory path and any files in int.
|
||||
func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) func() error {
|
||||
return func() error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// NOTE: Healing on meta is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// meta are always upto date and correct.
|
||||
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error {
|
||||
return objAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Skip metacache entries healing
|
||||
if strings.HasPrefix(object, "buckets/.minio.sys/.metacache/") {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
@@ -843,39 +849,27 @@ func (h *healSequence) healDiskFormat() error {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
return h.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// 1. If a bucket was specified, heal only the bucket.
|
||||
if h.bucket != "" {
|
||||
return h.healBucket(h.bucket, bucketsOnly)
|
||||
return h.healBucket(objAPI, h.bucket, bucketsOnly)
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBucketsHeal(h.ctx)
|
||||
buckets, err := objAPI.ListBucketsHeal(h.ctx)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
if err = h.healBucket(bucket.Name, bucketsOnly); err != nil {
|
||||
if err = h.healBucket(objAPI, bucket.Name, bucketsOnly); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -884,13 +878,7 @@ func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
// healBucket - traverses and heals given bucket
|
||||
func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly bool) error {
|
||||
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return err
|
||||
@@ -905,7 +893,7 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
if h.object != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
// and if so heal it.
|
||||
oi, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
oi, err := objAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
if err == nil {
|
||||
if err = h.healObject(bucket, h.object, oi.VersionID); err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
@@ -919,7 +907,7 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
if err := objAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
@@ -931,12 +919,6 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
|
||||
// healObject - heal the given object and record result
|
||||
func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
@@ -116,7 +116,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
// Add user IAM
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountUsageInfoHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(httpTraceAll(adminAPI.AccountInfoHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
@@ -180,19 +180,19 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
}
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
}
|
||||
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// RemoveRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
}
|
||||
}
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
if globalIsDistErasure {
|
||||
@@ -211,23 +211,18 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
|
||||
|
||||
if !globalIsGateway {
|
||||
// -- OBD API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
|
||||
Queries("perfdrive", "{perfdrive:true|false}",
|
||||
"perfnet", "{perfnet:true|false}",
|
||||
"minioinfo", "{minioinfo:true|false}",
|
||||
"minioconfig", "{minioconfig:true|false}",
|
||||
"syscpu", "{syscpu:true|false}",
|
||||
"sysdiskhw", "{sysdiskhw:true|false}",
|
||||
"sysosinfo", "{sysosinfo:true|false}",
|
||||
"sysmem", "{sysmem:true|false}",
|
||||
"sysprocess", "{sysprocess:true|false}",
|
||||
)
|
||||
// Keep obdinfo for backward compatibility with mc
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.HealthInfoHandler))
|
||||
// -- Health API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.HealthInfoHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/bandwidth").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.BandwidthMonitorHandler))
|
||||
}
|
||||
}
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.NotFoundHandler = httpTraceAll(errorResponseHandler)
|
||||
adminRouter.MethodNotAllowedHandler = httpTraceAll(methodNotAllowedHandler("Admin"))
|
||||
}
|
||||
|
||||
@@ -24,13 +24,13 @@ import (
|
||||
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
|
||||
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
|
||||
addr := r.Host
|
||||
if globalIsDistErasure {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
addr = GetLocalPeer(endpointServerPools)
|
||||
}
|
||||
network := make(map[string]string)
|
||||
for _, ep := range endpointZones {
|
||||
for _, ep := range endpointServerPools {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
nodeName := endpoint.Host
|
||||
if nodeName == "" {
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DeletedObject objects deleted
|
||||
@@ -26,12 +27,44 @@ type DeletedObject struct {
|
||||
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
|
||||
ObjectName string `xml:"Key,omitempty"`
|
||||
VersionID string `xml:"VersionId,omitempty"`
|
||||
|
||||
// MinIO extensions to support delete marker replication
|
||||
// Replication status of DeleteMarker
|
||||
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus,omitempty"`
|
||||
// MTime of DeleteMarker on source that needs to be propagated to replica
|
||||
DeleteMarkerMTime DeleteMarkerMTime `xml:"DeleteMarkerMTime,omitempty"`
|
||||
// Status of versioned delete (of object or DeleteMarker)
|
||||
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus,omitempty"`
|
||||
// PurgeTransitioned is nonempty if object is in transition tier
|
||||
PurgeTransitioned string `xml:"PurgeTransitioned,omitempty"`
|
||||
}
|
||||
|
||||
// DeleteMarkerMTime is an embedded type containing time.Time for XML marshal
|
||||
type DeleteMarkerMTime struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if t.Time.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// ObjectToDelete carries key name for the object to delete.
|
||||
type ObjectToDelete struct {
|
||||
ObjectName string `xml:"Key"`
|
||||
VersionID string `xml:"VersionId"`
|
||||
// Replication status of DeleteMarker
|
||||
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus"`
|
||||
// Status of versioned delete (of object or DeleteMarker)
|
||||
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus"`
|
||||
// Version ID of delete marker
|
||||
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId"`
|
||||
// PurgeTransitioned is nonempty if object is in transition tier
|
||||
PurgeTransitioned string `xml:"PurgeTransitioned"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
|
||||
@@ -106,22 +106,25 @@ const (
|
||||
ErrNoSuchCORSConfiguration
|
||||
ErrNoSuchWebsiteConfiguration
|
||||
ErrReplicationConfigurationNotFoundError
|
||||
ErrReplicationDestinationNotFoundError
|
||||
ErrRemoteDestinationNotFoundError
|
||||
ErrReplicationDestinationMissingLock
|
||||
ErrReplicationTargetNotFoundError
|
||||
ErrRemoteTargetNotFoundError
|
||||
ErrReplicationRemoteConnectionError
|
||||
ErrBucketRemoteIdenticalToSource
|
||||
ErrBucketRemoteAlreadyExists
|
||||
ErrBucketRemoteLabelInUse
|
||||
ErrBucketRemoteArnTypeInvalid
|
||||
ErrBucketRemoteArnInvalid
|
||||
ErrBucketRemoteRemoveDisallowed
|
||||
ErrReplicationTargetNotVersionedError
|
||||
ErrRemoteTargetNotVersionedError
|
||||
ErrReplicationSourceNotVersionedError
|
||||
ErrReplicationNeedsVersioningError
|
||||
ErrReplicationBucketNeedsVersioningError
|
||||
ErrBucketReplicationDisabledError
|
||||
ErrObjectRestoreAlreadyInProgress
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrInvalidVersionID
|
||||
ErrNoSuchVersion
|
||||
ErrNotImplemented
|
||||
ErrPreconditionFailed
|
||||
@@ -232,6 +235,7 @@ const (
|
||||
ErrInvalidResourceName
|
||||
ErrServerNotInitialized
|
||||
ErrOperationTimedOut
|
||||
ErrClientDisconnected
|
||||
ErrOperationMaxedOut
|
||||
ErrInvalidRequest
|
||||
// MinIO storage class error codes
|
||||
@@ -360,6 +364,7 @@ const (
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrAccountNotEligible
|
||||
ErrServiceAccountNotFound
|
||||
ErrPostPolicyConditionInvalidFormat
|
||||
)
|
||||
@@ -554,11 +559,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchVersion: {
|
||||
ErrInvalidVersionID: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Invalid version id specified",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchVersion: {
|
||||
Code: "NoSuchVersion",
|
||||
Description: "The specified version does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNotImplemented: {
|
||||
Code: "NotImplemented",
|
||||
Description: "A header you provided implies functionality that is not implemented",
|
||||
@@ -809,9 +819,9 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The replication configuration was not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationDestinationNotFoundError: {
|
||||
Code: "ReplicationDestinationNotFoundError",
|
||||
Description: "The replication destination bucket does not exist",
|
||||
ErrRemoteDestinationNotFoundError: {
|
||||
Code: "RemoteDestinationNotFoundError",
|
||||
Description: "The remote destination bucket does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationDestinationMissingLock: {
|
||||
@@ -819,29 +829,34 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The replication destination bucket does not have object locking enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationTargetNotFoundError: {
|
||||
Code: "XminioAdminReplicationTargetNotFoundError",
|
||||
Description: "The replication target does not exist",
|
||||
ErrRemoteTargetNotFoundError: {
|
||||
Code: "XMinioAdminRemoteTargetNotFoundError",
|
||||
Description: "The remote target does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationRemoteConnectionError: {
|
||||
Code: "XminioAdminReplicationRemoteConnectionError",
|
||||
Description: "Remote service endpoint or target bucket not available",
|
||||
Code: "XMinioAdminReplicationRemoteConnectionError",
|
||||
Description: "Remote service connection error - please check remote service credentials and target bucket",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrBucketRemoteIdenticalToSource: {
|
||||
Code: "XminioAdminRemoteIdenticalToSource",
|
||||
Code: "XMinioAdminRemoteIdenticalToSource",
|
||||
Description: "The remote target cannot be identical to source",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteAlreadyExists: {
|
||||
Code: "XminioAdminBucketRemoteAlreadyExists",
|
||||
Code: "XMinioAdminBucketRemoteAlreadyExists",
|
||||
Description: "The remote target already exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteLabelInUse: {
|
||||
Code: "XMinioAdminBucketRemoteLabelInUse",
|
||||
Description: "The remote target with this label already exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteRemoveDisallowed: {
|
||||
Code: "XMinioAdminRemoteRemoveDisallowed",
|
||||
Description: "Replication configuration exists with this ARN.",
|
||||
Description: "This ARN is in use by an existing configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteArnTypeInvalid: {
|
||||
@@ -854,9 +869,9 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The bucket remote ARN does not have correct format",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationTargetNotVersionedError: {
|
||||
Code: "ReplicationTargetNotVersionedError",
|
||||
Description: "The replication target does not have versioning enabled",
|
||||
ErrRemoteTargetNotVersionedError: {
|
||||
Code: "RemoteTargetNotVersionedError",
|
||||
Description: "The remote target does not have versioning enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationSourceNotVersionedError: {
|
||||
@@ -909,6 +924,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectRestoreAlreadyInProgress: {
|
||||
Code: "RestoreAlreadyInProgress",
|
||||
Description: "Object restore is already in progress",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
/// Bucket notification related errors.
|
||||
ErrEventNotification: {
|
||||
Code: "InvalidArgument",
|
||||
@@ -971,7 +991,7 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMetadataTooLarge: {
|
||||
Code: "InvalidArgument",
|
||||
Code: "MetadataTooLarge",
|
||||
Description: "Your metadata headers exceed the maximum allowed metadata size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
@@ -1210,6 +1230,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrClientDisconnected: {
|
||||
Code: "ClientDisconnected",
|
||||
Description: "Client disconnected before response was ready",
|
||||
HTTPStatusCode: 499, // No official code, use nginx value.
|
||||
},
|
||||
ErrOperationMaxedOut: {
|
||||
Code: "SlowDown",
|
||||
Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate",
|
||||
@@ -1708,12 +1733,17 @@ var errorCodes = errorCodeMap{
|
||||
ErrAddUserInvalidArgument: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "User is not allowed to be same as admin access key",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAdminAccountNotEligible: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The administrator key is not eligible for this operation",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAccountNotEligible: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The account key is not eligible for this operation",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrServiceAccountNotFound: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
@@ -1736,6 +1766,16 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Only return ErrClientDisconnected if the provided context is actually canceled.
|
||||
// This way downstream context.Canceled will still report ErrOperationTimedOut
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctx.Err() == context.Canceled {
|
||||
return ErrClientDisconnected
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
switch err {
|
||||
case errInvalidArgument:
|
||||
apiErr = ErrAdminInvalidArgument
|
||||
@@ -1852,6 +1892,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrNoSuchKey
|
||||
case MethodNotAllowed:
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case InvalidVersionID:
|
||||
apiErr = ErrInvalidVersionID
|
||||
case VersionNotFound:
|
||||
apiErr = ErrNoSuchVersion
|
||||
case ObjectAlreadyExists:
|
||||
@@ -1906,24 +1948,26 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrAdminNoSuchQuotaConfiguration
|
||||
case BucketReplicationConfigNotFound:
|
||||
apiErr = ErrReplicationConfigurationNotFoundError
|
||||
case BucketReplicationDestinationNotFound:
|
||||
apiErr = ErrReplicationDestinationNotFoundError
|
||||
case BucketRemoteDestinationNotFound:
|
||||
apiErr = ErrRemoteDestinationNotFoundError
|
||||
case BucketReplicationDestinationMissingLock:
|
||||
apiErr = ErrReplicationDestinationMissingLock
|
||||
case BucketRemoteTargetNotFound:
|
||||
apiErr = ErrReplicationTargetNotFoundError
|
||||
apiErr = ErrRemoteTargetNotFoundError
|
||||
case BucketRemoteConnectionErr:
|
||||
apiErr = ErrReplicationRemoteConnectionError
|
||||
case BucketRemoteAlreadyExists:
|
||||
apiErr = ErrBucketRemoteAlreadyExists
|
||||
case BucketRemoteLabelInUse:
|
||||
apiErr = ErrBucketRemoteLabelInUse
|
||||
case BucketRemoteArnTypeInvalid:
|
||||
apiErr = ErrBucketRemoteArnTypeInvalid
|
||||
case BucketRemoteArnInvalid:
|
||||
apiErr = ErrBucketRemoteArnInvalid
|
||||
case BucketRemoteRemoveDisallowed:
|
||||
apiErr = ErrBucketRemoteRemoveDisallowed
|
||||
case BucketReplicationTargetNotVersioned:
|
||||
apiErr = ErrReplicationTargetNotVersionedError
|
||||
case BucketRemoteTargetNotVersioned:
|
||||
apiErr = ErrRemoteTargetNotVersionedError
|
||||
case BucketReplicationSourceNotVersioned:
|
||||
apiErr = ErrReplicationSourceNotVersionedError
|
||||
case BucketQuotaExceeded:
|
||||
@@ -1956,6 +2000,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrBackendDown
|
||||
case ObjectNameTooLong:
|
||||
apiErr = ErrKeyTooLongError
|
||||
case dns.ErrInvalidBucketName:
|
||||
apiErr = ErrInvalidBucketName
|
||||
default:
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
@@ -1992,6 +2038,12 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
|
||||
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
e, ok := err.(dns.ErrInvalidBucketName)
|
||||
if ok {
|
||||
code := toAPIErrorCode(ctx, e)
|
||||
apiErr = errorCodes.ToAPIErrWithErr(code, e)
|
||||
}
|
||||
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
|
||||
@@ -47,7 +47,9 @@ func setEventStreamHeaders(w http.ResponseWriter) {
|
||||
|
||||
// Write http common headers
|
||||
func setCommonHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set(xhttp.ServerInfo, "MinIO/"+ReleaseTag)
|
||||
// Set the "Server" http header.
|
||||
w.Header().Set(xhttp.ServerInfo, "MinIO")
|
||||
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerRegion; region != "" {
|
||||
@@ -84,7 +86,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
@@ -115,10 +117,11 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
}
|
||||
|
||||
// Set tag count if object has tags
|
||||
tags, _ := url.ParseQuery(objInfo.UserTags)
|
||||
tagCount := len(tags)
|
||||
if tagCount > 0 {
|
||||
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tagCount)}
|
||||
if len(objInfo.UserTags) > 0 {
|
||||
tags, _ := url.ParseQuery(objInfo.UserTags)
|
||||
if len(tags) > 0 {
|
||||
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(len(tags))}
|
||||
}
|
||||
}
|
||||
|
||||
// Set all other user defined metadata.
|
||||
@@ -147,13 +150,18 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
}
|
||||
}
|
||||
|
||||
var start, rangeLen int64
|
||||
totalObjectSize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// for providing ranged content
|
||||
start, rangeLen, err := rs.GetOffsetLength(totalObjectSize)
|
||||
if opts.PartNumber > 0 {
|
||||
rs = partNumberToRangeSpec(objInfo, opts.PartNumber)
|
||||
}
|
||||
|
||||
// For providing ranged content
|
||||
start, rangeLen, err = rs.GetOffsetLength(totalObjectSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -186,6 +194,9 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
|
||||
}
|
||||
}
|
||||
if objInfo.TransitionStatus == lifecycle.TransitionComplete {
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.StorageClass}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -35,11 +35,11 @@ import (
|
||||
|
||||
const (
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = metacacheBlockSize - (metacacheBlockSize / 10) // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
)
|
||||
|
||||
// LocationResponse - format for location response.
|
||||
@@ -408,7 +408,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
// generates ListBucketsResponse from array of BucketInfo which can be
|
||||
// serialized to match XML and JSON API spec output.
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
var listbuckets []Bucket
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{}
|
||||
|
||||
@@ -428,8 +428,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
var versions []ObjectVersion
|
||||
var prefixes []CommonPrefix
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListVersionsResponse{}
|
||||
|
||||
@@ -473,6 +472,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
data.VersionIDMarker = versionIDMarker
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -484,8 +484,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
var contents []Object
|
||||
var prefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsResponse{}
|
||||
|
||||
@@ -517,9 +516,10 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
data.Marker = s3EncodeName(marker, encodingType)
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -531,8 +531,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
var contents []Object
|
||||
var commonPrefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
@@ -585,6 +584,8 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
|
||||
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
|
||||
data.IsTruncated = isTruncated
|
||||
|
||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -702,10 +703,6 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
|
||||
}
|
||||
|
||||
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
if newObjectLayerFn() == nil {
|
||||
// Server still in safe mode.
|
||||
w.Header().Set(xhttp.MinIOServerStatus, "safemode")
|
||||
}
|
||||
setCommonHeaders(w)
|
||||
if mType != mimeNone {
|
||||
w.Header().Set(xhttp.ContentType, string(mType))
|
||||
@@ -772,10 +769,6 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalBrowserEnabled {
|
||||
if newObjectLayerFn() == nil {
|
||||
// server still in safe mode.
|
||||
w.Header().Set(xhttp.MinIOServerStatus, "safemode")
|
||||
}
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
|
||||
@@ -32,39 +32,28 @@ func newHTTPServerFn() *xhttp.Server {
|
||||
return globalHTTPServer
|
||||
}
|
||||
|
||||
func newObjectLayerWithoutSafeModeFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newObjectLayerFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newCachedObjectLayerFn() CacheObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
return globalCacheObjectAPI
|
||||
}
|
||||
|
||||
func setObjectLayer(o ObjectLayer) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = o
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
CacheAPI func() CacheObjectLayer
|
||||
// Returns true of handlers should interpret encryption.
|
||||
EncryptionEnabled func() bool
|
||||
// Returns true if handlers allow SSE-KMS encryption headers.
|
||||
AllowSSEKMS func() bool
|
||||
}
|
||||
|
||||
// getHost tries its best to return the request host.
|
||||
@@ -78,17 +67,11 @@ func getHost(r *http.Request) string {
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) {
|
||||
func registerAPIRouter(router *mux.Router) {
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
CacheAPI: newCachedObjectLayerFn,
|
||||
EncryptionEnabled: func() bool {
|
||||
return encryptionEnabled
|
||||
},
|
||||
AllowSSEKMS: func() bool {
|
||||
return allowSSEKMS
|
||||
},
|
||||
}
|
||||
|
||||
// API Router
|
||||
@@ -98,7 +81,10 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
for _, domainName := range globalDomainNames {
|
||||
if IsKubernetes() {
|
||||
routers = append(routers, apiRouter.MatcherFunc(func(r *http.Request, match *mux.RouteMatch) bool {
|
||||
host, _, _ := net.SplitHostPort(getHost(r))
|
||||
host, _, err := net.SplitHostPort(getHost(r))
|
||||
if err != nil {
|
||||
host = r.Host
|
||||
}
|
||||
// Make sure to skip matching minio.<domain>` this is
|
||||
// specifically meant for operator/k8s deployment
|
||||
// The reason we need to skip this is for a special
|
||||
@@ -321,6 +307,9 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
// DeleteBucket
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler))))
|
||||
// PostRestoreObject
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("restoreobject", httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
@@ -339,8 +328,8 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.MethodNotAllowedHandler = http.HandlerFunc(collectAPIStats("methodnotallowed", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
||||
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -333,8 +333,12 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Populate payload again to handle it in HTTP handler.
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
|
||||
}
|
||||
if cred.AccessKey != "" {
|
||||
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
|
||||
}
|
||||
|
||||
if cred.AccessKey == "" {
|
||||
if action != policy.ListAllMyBucketsAction && cred.AccessKey == "" {
|
||||
// Anonymous checks are not meant for ListBuckets action
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: action,
|
||||
@@ -378,12 +382,13 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
|
||||
// verify as a fallback.
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(policy.ListBucketAction),
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ObjectName: objectName,
|
||||
@@ -554,7 +559,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.Action(policy.BypassGovernanceRetentionAction),
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
@@ -563,7 +568,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.Action(policy.PutObjectRetentionAction),
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
@@ -586,7 +591,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
Action: iampolicy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ObjectName: objectName,
|
||||
ConditionValues: conditions,
|
||||
@@ -596,7 +601,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
Action: iampolicy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
ObjectName: objectName,
|
||||
@@ -614,7 +619,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
|
||||
// call verifies bucket policies and IAM policies, supports multi user
|
||||
// checks etc.
|
||||
func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
|
||||
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch atype {
|
||||
@@ -635,6 +640,10 @@ func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.R
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if cred.AccessKey != "" {
|
||||
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
|
||||
}
|
||||
|
||||
// Do not check for PutObjectRetentionAction permission,
|
||||
// if mode and retain until date are not set.
|
||||
// Can happen when bucket has default lock config set
|
||||
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -55,20 +54,33 @@ func (h *healRoutine) queueHealTask(task healTask) {
|
||||
h.tasks <- task
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq(tolerance int32, maxWait time.Duration) {
|
||||
const wait = 10 * time.Millisecond
|
||||
waitCount := maxWait / wait
|
||||
func waitForLowHTTPReq(maxIO int, maxWait time.Duration) {
|
||||
// No need to wait run at full speed.
|
||||
if maxIO <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// At max 10 attempts to wait with 100 millisecond interval before proceeding
|
||||
waitCount := 10
|
||||
waitTick := 100 * time.Millisecond
|
||||
|
||||
// Bucket notification and http trace are not costly, it is okay to ignore them
|
||||
// while counting the number of concurrent connections
|
||||
tolerance += int32(globalHTTPListen.NumSubscribers() + globalHTTPTrace.NumSubscribers())
|
||||
maxIOFn := func() int {
|
||||
return maxIO + globalHTTPListen.NumSubscribers() + globalHTTPTrace.NumSubscribers()
|
||||
}
|
||||
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Any requests in progress, delay the heal.
|
||||
for (httpServer.GetRequestCount() >= tolerance) &&
|
||||
waitCount > 0 {
|
||||
for httpServer.GetRequestCount() >= maxIOFn() {
|
||||
time.Sleep(waitTick)
|
||||
waitCount--
|
||||
time.Sleep(wait)
|
||||
if waitCount == 0 {
|
||||
if intDataUpdateTracker.debug {
|
||||
logger.Info("waitForLowHTTPReq: waited %d times, resuming", waitCount)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -82,9 +94,6 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
break
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()), time.Second)
|
||||
|
||||
var res madmin.HealResultItem
|
||||
var err error
|
||||
switch {
|
||||
@@ -97,9 +106,6 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
case task.bucket != "" && task.object != "":
|
||||
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
|
||||
}
|
||||
if task.bucket != "" && task.object != "" {
|
||||
ObjectPathUpdated(path.Join(task.bucket, task.object))
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
|
||||
case <-h.doneCh:
|
||||
@@ -129,24 +135,5 @@ func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpt
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers if healing is not required.
|
||||
if err == nil {
|
||||
// Notify servers in background and retry if needed.
|
||||
go func() {
|
||||
retry:
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(opts.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
if nerr.Err.Error() == errServerNotInitialized.Error() {
|
||||
time.Sleep(time.Second)
|
||||
goto retry
|
||||
}
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -26,10 +26,20 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
const defaultMonitorNewDiskInterval = time.Second * 10
|
||||
const (
|
||||
defaultMonitorNewDiskInterval = time.Second * 10
|
||||
healingTrackerFilename = ".healing.bin"
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
type healingTracker struct {
|
||||
ID string
|
||||
|
||||
// future add more tracking capabilities
|
||||
}
|
||||
|
||||
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
z, ok := objAPI.(*erasureZones)
|
||||
z, ok := objAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -47,16 +57,15 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
for _, ep := range getLocalDisksToHeal() {
|
||||
globalBackgroundHealState.pushHealLocalDisks(ep)
|
||||
}
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
|
||||
if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
|
||||
drivesToHeal, defaultMonitorNewDiskInterval))
|
||||
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
if err := bgSeq.healDiskMeta(); err != nil {
|
||||
// Start with format healing
|
||||
if err := bgSeq.healDiskFormat(); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
@@ -65,6 +74,14 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := bgSeq.healDiskMeta(objAPI); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
|
||||
}
|
||||
|
||||
@@ -76,9 +93,11 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
disk, _, err := connectEndpoint(endpoint)
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, endpoint)
|
||||
} else if err == nil && disk != nil && disk.Healing() {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -91,71 +110,108 @@ func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence())
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureZones, bgSeq *healSequence) {
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq *healSequence) {
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
wait:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(defaultMonitorNewDiskInterval):
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()), time.Second)
|
||||
|
||||
var erasureSetInZoneEndpointToHeal = make([]map[int]Endpoint, len(z.zones))
|
||||
for i := range z.zones {
|
||||
erasureSetInZoneEndpointToHeal[i] = map[int]Endpoint{}
|
||||
}
|
||||
var erasureSetInZoneDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
healDisks := globalBackgroundHealState.getHealLocalDisks()
|
||||
// heal only if new disks found.
|
||||
for _, endpoint := range healDisks {
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
len(healDisks)))
|
||||
|
||||
if len(healDisks) > 0 {
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{bucket: nopHeal}
|
||||
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
len(healDisks)))
|
||||
|
||||
erasureSetInZoneDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
||||
for i := range z.serverPools {
|
||||
erasureSetInZoneDisksToHeal[i] = map[int][]StorageAPI{}
|
||||
}
|
||||
}
|
||||
|
||||
// heal only if new disks found.
|
||||
for _, endpoint := range healDisks {
|
||||
disk, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, true)
|
||||
continue
|
||||
}
|
||||
|
||||
zoneIdx := globalEndpoints.GetLocalZoneIdx(endpoint)
|
||||
zoneIdx := globalEndpoints.GetLocalZoneIdx(disk.Endpoint())
|
||||
if zoneIdx < 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[zoneIdx].format, format)
|
||||
z.serverPools[zoneIdx].erasureDisksMu.RLock()
|
||||
// Protect reading reference format.
|
||||
setIndex, _, err := findDiskIndex(z.serverPools[zoneIdx].format, format)
|
||||
z.serverPools[zoneIdx].erasureDisksMu.RUnlock()
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, false)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetInZoneEndpointToHeal[zoneIdx][setIndex] = endpoint
|
||||
erasureSetInZoneDisksToHeal[zoneIdx][setIndex] = append(erasureSetInZoneDisksToHeal[zoneIdx][setIndex], disk)
|
||||
}
|
||||
|
||||
for i, setMap := range erasureSetInZoneEndpointToHeal {
|
||||
for setIndex, endpoint := range setMap {
|
||||
logger.Info("Healing disk '%s' on %s zone", endpoint, humanize.Ordinal(i+1))
|
||||
buckets, _ := z.ListBucketsHeal(ctx)
|
||||
for i, setMap := range erasureSetInZoneDisksToHeal {
|
||||
for setIndex, disks := range setMap {
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%s' on %s zone", disk, humanize.Ordinal(i+1))
|
||||
|
||||
if err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].setDriveCount); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
if !disk.Healing() {
|
||||
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s zone", disk, humanize.Ordinal(i+1))
|
||||
diskID, err := disk.GetDiskID()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// reading format.json failed or not found, proceed to look
|
||||
// for new disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
}
|
||||
|
||||
if err := saveHealingTracker(disk, diskID); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// Unable to write healing tracker, permission denied or some
|
||||
// other unexpected error occurred. Proceed to look for new
|
||||
// disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
}
|
||||
}
|
||||
|
||||
lbDisks := z.serverPools[i].sets[setIndex].getOnlineDisks()
|
||||
if err := healErasureSet(ctx, setIndex, buckets, lbDisks); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s zone complete", disk, humanize.Ordinal(i+1))
|
||||
|
||||
if err := disk.Delete(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
healingTrackerFilename, false); err != nil && !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
}
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(endpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
110
cmd/background-newdisks-heal-ops_gen.go
Normal file
110
cmd/background-newdisks-heal-ops_gen.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ID":
|
||||
z.ID, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
// write "ID"
|
||||
err = en.Append(0x81, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.ID)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// string "ID"
|
||||
o = append(o, 0x81, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ID":
|
||||
z.ID, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z healingTracker) Msgsize() (s int) {
|
||||
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID)
|
||||
return
|
||||
}
|
||||
123
cmd/background-newdisks-heal-ops_gen_test.go
Normal file
123
cmd/background-newdisks-heal-ops_gen_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalhealingTracker(t *testing.T) {
|
||||
v := healingTracker{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsghealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsghealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalhealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodehealingTracker(t *testing.T) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodehealingTracker Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := healingTracker{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodehealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodehealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -140,8 +140,8 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err := &errHashMismatch{fmt.Sprintf("Disk: %s - content hash does not match - expected %s, got %s",
|
||||
b.disk, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
err := &errHashMismatch{fmt.Sprintf("Disk: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -71,12 +71,12 @@ func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error)
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(GlobalContext, errLessData)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
|
||||
@@ -18,9 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -54,7 +52,7 @@ type bootstrapRESTServer struct{}
|
||||
type ServerSystemConfig struct {
|
||||
MinioPlatform string
|
||||
MinioRuntime string
|
||||
MinioEndpoints EndpointZones
|
||||
MinioEndpoints EndpointServerPools
|
||||
}
|
||||
|
||||
// Diff - returns error on first difference found in two configs.
|
||||
@@ -161,9 +159,9 @@ func (client *bootstrapRESTClient) Verify(ctx context.Context, srcCfg ServerSyst
|
||||
return srcCfg.Diff(recvCfg)
|
||||
}
|
||||
|
||||
func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones) error {
|
||||
func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointServerPools) error {
|
||||
srcCfg := getServerSystemCfg()
|
||||
clnts := newBootstrapRESTClients(endpointZones)
|
||||
clnts := newBootstrapRESTClients(endpointServerPools)
|
||||
var onlineServers int
|
||||
var offlineEndpoints []string
|
||||
var retries int
|
||||
@@ -188,7 +186,7 @@ func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones)
|
||||
retries++
|
||||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d remote servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
}
|
||||
@@ -198,10 +196,10 @@ func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones)
|
||||
return nil
|
||||
}
|
||||
|
||||
func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient {
|
||||
func newBootstrapRESTClients(endpointServerPools EndpointServerPools) []*bootstrapRESTClient {
|
||||
seenHosts := set.NewStringSet()
|
||||
var clnts []*bootstrapRESTClient
|
||||
for _, ep := range endpointZones {
|
||||
for _, ep := range endpointServerPools {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
@@ -225,26 +223,8 @@ func newBootstrapRESTClient(endpoint Endpoint) *bootstrapRESTClient {
|
||||
Path: bootstrapRESTPath,
|
||||
}
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
if globalIsSSL {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: endpoint.Hostname(),
|
||||
RootCAs: globalRootCAs,
|
||||
}
|
||||
}
|
||||
|
||||
trFn := newInternodeHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)
|
||||
restClient := rest.NewClient(serverURL, trFn, newAuthToken)
|
||||
restClient.HealthCheckFn = func() bool {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, restClient.HealthCheckTimeout)
|
||||
// Instantiate a new rest client for healthcheck
|
||||
// to avoid recursive healthCheckFn()
|
||||
respBody, err := rest.NewClient(serverURL, trFn, newAuthToken).Call(ctx, bootstrapRESTMethodHealth, nil, nil, -1)
|
||||
xhttp.DrainBody(respBody)
|
||||
cancel()
|
||||
var ne *rest.NetworkError
|
||||
return !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &ne)
|
||||
}
|
||||
restClient := rest.NewClient(serverURL, globalInternodeTransport, newAuthToken)
|
||||
restClient.HealthCheckFn = nil
|
||||
|
||||
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient}
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func NewBucketSSEConfigSys() *BucketSSEConfigSys {
|
||||
// Get - gets bucket encryption config for the given bucket.
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
@@ -37,6 +38,7 @@ import (
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
@@ -383,6 +385,10 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Call checkRequestAuthType to populate ReqInfo.AccessKey before GetBucketInfo()
|
||||
// Ignore errors here to preserve the S3 error behavior of GetBucketInfo()
|
||||
checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "")
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objectAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
@@ -400,7 +406,18 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
var (
|
||||
hasLockEnabled, hasLifecycleConfig bool
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
)
|
||||
replicateDeletes := hasReplicationRules(ctx, bucket, deleteObjects.Objects)
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
hasLockEnabled = true
|
||||
}
|
||||
if _, err := globalBucketMetadataSys.GetLifecycleConfig(bucket); err == nil {
|
||||
hasLifecycleConfig = true
|
||||
}
|
||||
dErrs := make([]DeleteError, len(deleteObjects.Objects))
|
||||
for index, object := range deleteObjects.Objects {
|
||||
if apiErrCode := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); apiErrCode != ErrNone {
|
||||
@@ -417,10 +434,47 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
continue
|
||||
}
|
||||
if object.VersionID != "" && object.VersionID != nullVersionID {
|
||||
if _, err := uuid.Parse(object.VersionID); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("invalid version-id specified %w", err))
|
||||
apiErr := errorCodes.ToAPIErr(ErrNoSuchVersion)
|
||||
dErrs[index] = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if replicateDeletes || hasLockEnabled || hasLifecycleConfig {
|
||||
goi, gerr = getObjectInfoFn(ctx, bucket, object.ObjectName, ObjectOptions{
|
||||
VersionID: object.VersionID,
|
||||
})
|
||||
}
|
||||
if hasLifecycleConfig && gerr == nil {
|
||||
object.PurgeTransitioned = goi.TransitionStatus
|
||||
}
|
||||
if replicateDeletes {
|
||||
delMarker, replicate := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
ObjectName: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}, goi, gerr)
|
||||
if replicate {
|
||||
if object.VersionID != "" {
|
||||
object.VersionPurgeStatus = Pending
|
||||
if delMarker {
|
||||
object.DeleteMarkerVersionID = object.VersionID
|
||||
}
|
||||
} else {
|
||||
object.DeleteMarkerReplicationStatus = string(replication.Pending)
|
||||
}
|
||||
}
|
||||
}
|
||||
if object.VersionID != "" {
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, getObjectInfoFn); apiErrCode != ErrNone {
|
||||
if hasLockEnabled {
|
||||
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, goi, gerr); apiErrCode != ErrNone {
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
dErrs[index] = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
@@ -454,15 +508,25 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
})
|
||||
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
for i := range errs {
|
||||
dindex := objectsToDelete[deleteList[i]]
|
||||
apiErr := toAPIError(ctx, errs[i])
|
||||
if apiErr.Code == "" || apiErr.Code == "NoSuchKey" || apiErr.Code == "InvalidArgument" {
|
||||
dindex := objectsToDelete[ObjectToDelete{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
DeleteMarkerVersionID: dObjects[i].DeleteMarkerVersionID,
|
||||
VersionPurgeStatus: dObjects[i].VersionPurgeStatus,
|
||||
DeleteMarkerReplicationStatus: dObjects[i].DeleteMarkerReplicationStatus,
|
||||
PurgeTransitioned: dObjects[i].PurgeTransitioned,
|
||||
}]
|
||||
if errs[i] == nil || isErrObjectNotFound(errs[i]) || isErrVersionNotFound(errs[i]) {
|
||||
if replicateDeletes {
|
||||
dObjects[i].DeleteMarkerReplicationStatus = deleteList[i].DeleteMarkerReplicationStatus
|
||||
dObjects[i].VersionPurgeStatus = deleteList[i].VersionPurgeStatus
|
||||
}
|
||||
deletedObjects[dindex] = dObjects[i]
|
||||
continue
|
||||
}
|
||||
apiErr := toAPIError(ctx, errs[i])
|
||||
dErrs[dindex] = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
@@ -484,22 +548,46 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
for _, dobj := range deletedObjects {
|
||||
if replicateDeletes {
|
||||
if dobj.DeleteMarkerReplicationStatus == string(replication.Pending) || dobj.VersionPurgeStatus == Pending {
|
||||
globalReplicationState.queueReplicaDeleteTask(DeletedObjectVersionInfo{
|
||||
DeletedObject: dobj,
|
||||
Bucket: bucket,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if hasLifecycleConfig && dobj.PurgeTransitioned == lifecycle.TransitionComplete { // clean up transitioned tier
|
||||
action := lifecycle.DeleteAction
|
||||
if dobj.VersionID != "" {
|
||||
action = lifecycle.DeleteVersionAction
|
||||
}
|
||||
deleteTransitionedObject(ctx, newObjectLayerFn(), bucket, dobj.ObjectName, lifecycle.ObjectOpts{
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
}, action, true)
|
||||
}
|
||||
|
||||
}
|
||||
// Notify deleted event for objects.
|
||||
for _, dobj := range deletedObjects {
|
||||
eventName := event.ObjectRemovedDelete
|
||||
|
||||
objInfo := ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
}
|
||||
|
||||
if dobj.DeleteMarker {
|
||||
objInfo = ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
VersionID: dobj.DeleteMarkerVersionID,
|
||||
}
|
||||
objInfo.DeleteMarker = dobj.DeleteMarker
|
||||
objInfo.VersionID = dobj.DeleteMarkerVersionID
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectRemovedDelete,
|
||||
EventName: eventName,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
@@ -659,16 +747,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if !api.EncryptionEnabled() && crypto.IsRequested(r.Header) {
|
||||
|
||||
if !objectAPI.IsEncryptionSupported() && crypto.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
|
||||
// To detect if the client has disconnected.
|
||||
r.Body = &contextReader{r.Body, r.Context()}
|
||||
|
||||
// Require Content-Length to be set in the request
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
@@ -1291,7 +1377,6 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketReplicationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -837,7 +836,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
|
||||
// Verify whether the bucket obtained object is same as the one created.
|
||||
if testCase.expectedContent != nil && !bytes.Equal(testCase.expectedContent, actualContent) {
|
||||
fmt.Println(string(testCase.expectedContent), string(actualContent))
|
||||
t.Log(string(testCase.expectedContent), string(actualContent))
|
||||
t.Errorf("Test %d : MinIO %s: Object content differs from expected value.", i+1, instanceType)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,6 +78,12 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the transition storage ARNs
|
||||
if err = validateLifecycleTransition(ctx, bucket, bucketLifecycle); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(bucketLifecycle)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
|
||||
@@ -17,7 +17,26 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
sse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/s3select"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -31,7 +50,7 @@ type LifecycleSys struct{}
|
||||
// Get - gets lifecycle config associated to a given bucket name.
|
||||
func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -46,3 +65,591 @@ func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, err er
|
||||
func NewLifecycleSys() *LifecycleSys {
|
||||
return &LifecycleSys{}
|
||||
}
|
||||
|
||||
type transitionState struct {
|
||||
// add future metrics here
|
||||
transitionCh chan ObjectInfo
|
||||
}
|
||||
|
||||
func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
|
||||
select {
|
||||
case t.transitionCh <- oi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalTransitionState *transitionState
|
||||
globalTransitionConcurrent = runtime.GOMAXPROCS(0) / 2
|
||||
)
|
||||
|
||||
func newTransitionState() *transitionState {
|
||||
|
||||
// fix minimum concurrent transition to 1 for single CPU setup
|
||||
if globalTransitionConcurrent == 0 {
|
||||
globalTransitionConcurrent = 1
|
||||
}
|
||||
ts := &transitionState{
|
||||
transitionCh: make(chan ObjectInfo, 10000),
|
||||
}
|
||||
go func() {
|
||||
<-GlobalContext.Done()
|
||||
close(ts.transitionCh)
|
||||
}()
|
||||
return ts
|
||||
}
|
||||
|
||||
// addWorker creates a new worker to process tasks
|
||||
func (t *transitionState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
// Add a new worker.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case oi, ok := <-t.transitionCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := transitionObject(ctx, objectAPI, oi); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func initBackgroundTransition(ctx context.Context, objectAPI ObjectLayer) {
|
||||
if globalTransitionState == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start with globalTransitionConcurrent.
|
||||
for i := 0; i < globalTransitionConcurrent; i++ {
|
||||
globalTransitionState.addWorker(ctx, objectAPI)
|
||||
}
|
||||
}
|
||||
|
||||
func validateLifecycleTransition(ctx context.Context, bucket string, lfc *lifecycle.Lifecycle) error {
|
||||
for _, rule := range lfc.Rules {
|
||||
if rule.Transition.StorageClass != "" {
|
||||
sameTarget, destbucket, err := validateTransitionDestination(ctx, bucket, rule.Transition.StorageClass)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sameTarget && destbucket == bucket {
|
||||
return fmt.Errorf("Transition destination cannot be the same as the source bucket")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateTransitionDestination returns error if transition destination bucket missing or not configured
|
||||
// It also returns true if transition destination is same as this server.
|
||||
func validateTransitionDestination(ctx context.Context, bucket string, targetLabel string) (bool, string, error) {
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetWithLabel(ctx, bucket, targetLabel)
|
||||
if tgt == nil {
|
||||
return false, "", BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
arn, err := madmin.ParseARN(tgt.Arn)
|
||||
if err != nil {
|
||||
return false, "", BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
if arn.Type != madmin.ILMService {
|
||||
return false, "", BucketRemoteArnTypeInvalid{}
|
||||
}
|
||||
clnt := globalBucketTargetSys.GetRemoteTargetClient(ctx, tgt.Arn)
|
||||
if clnt == nil {
|
||||
return false, "", BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
if found, _ := clnt.BucketExists(ctx, arn.Bucket); !found {
|
||||
return false, "", BucketRemoteDestinationNotFound{Bucket: arn.Bucket}
|
||||
}
|
||||
sameTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
|
||||
return sameTarget, arn.Bucket, nil
|
||||
}
|
||||
|
||||
// transitionSC returns storage class label for this bucket
|
||||
func transitionSC(ctx context.Context, bucket string) string {
|
||||
cfg, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
for _, rule := range cfg.Rules {
|
||||
if rule.Status == Disabled {
|
||||
continue
|
||||
}
|
||||
if rule.Transition.StorageClass != "" {
|
||||
return rule.Transition.StorageClass
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// return true if ARN representing transition storage class is present in a active rule
|
||||
// for the lifecycle configured on this bucket
|
||||
func transitionSCInUse(ctx context.Context, lfc *lifecycle.Lifecycle, bucket, arnStr string) bool {
|
||||
tgtLabel := globalBucketTargetSys.GetRemoteLabelWithArn(ctx, bucket, arnStr)
|
||||
if tgtLabel == "" {
|
||||
return false
|
||||
}
|
||||
for _, rule := range lfc.Rules {
|
||||
if rule.Status == Disabled {
|
||||
continue
|
||||
}
|
||||
if rule.Transition.StorageClass != "" && rule.Transition.StorageClass == tgtLabel {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// set PutObjectOptions for PUT operation to transition data to target cluster
|
||||
func putTransitionOpts(objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
|
||||
meta := make(map[string]string)
|
||||
|
||||
tag, err := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
putOpts = miniogo.PutObjectOptions{
|
||||
UserMetadata: meta,
|
||||
UserTags: tag.ToMap(),
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: objInfo.StorageClass,
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
},
|
||||
}
|
||||
if mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {
|
||||
rmode := miniogo.RetentionMode(mode)
|
||||
putOpts.Mode = rmode
|
||||
}
|
||||
if retainDateStr, ok := objInfo.UserDefined[xhttp.AmzObjectLockRetainUntilDate]; ok {
|
||||
rdate, err := time.Parse(time.RFC3339, retainDateStr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
putOpts.RetainUntilDate = rdate
|
||||
}
|
||||
if lhold, ok := objInfo.UserDefined[xhttp.AmzObjectLockLegalHold]; ok {
|
||||
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// handle deletes of transitioned objects or object versions when one of the following is true:
|
||||
// 1. temporarily restored copies of objects (restored with the PostRestoreObject API) expired.
|
||||
// 2. life cycle expiry date is met on the object.
|
||||
// 3. Object is removed through DELETE api call
|
||||
func deleteTransitionedObject(ctx context.Context, objectAPI ObjectLayer, bucket, object string, lcOpts lifecycle.ObjectOpts, action lifecycle.Action, isDeleteTierOnly bool) error {
|
||||
if lcOpts.TransitionStatus == "" && !isDeleteTierOnly {
|
||||
return nil
|
||||
}
|
||||
lc, err := globalLifecycleSys.Get(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
arn := getLifecycleTransitionTargetArn(ctx, lc, bucket, lcOpts)
|
||||
if arn == nil {
|
||||
return fmt.Errorf("remote target not configured")
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arn.String())
|
||||
if tgt == nil {
|
||||
return fmt.Errorf("remote target not configured")
|
||||
}
|
||||
|
||||
var opts ObjectOptions
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(bucket)
|
||||
opts.VersionID = lcOpts.VersionID
|
||||
switch action {
|
||||
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
|
||||
// delete locally restored copy of object or object version
|
||||
// from the source, while leaving metadata behind. The data on
|
||||
// transitioned tier lies untouched and still accessible
|
||||
opts.TransitionStatus = lcOpts.TransitionStatus
|
||||
_, err = objectAPI.DeleteObject(ctx, bucket, object, opts)
|
||||
return err
|
||||
case lifecycle.DeleteAction, lifecycle.DeleteVersionAction:
|
||||
// When an object is past expiry, delete the data from transitioned tier and
|
||||
// metadata from source
|
||||
if err := tgt.RemoveObject(context.Background(), arn.Bucket, object, miniogo.RemoveObjectOptions{VersionID: lcOpts.VersionID}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
if isDeleteTierOnly {
|
||||
return nil
|
||||
}
|
||||
_, err = objectAPI.DeleteObject(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eventName := event.ObjectRemovedDelete
|
||||
if lcOpts.DeleteMarker {
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
objInfo := ObjectInfo{
|
||||
Name: object,
|
||||
VersionID: lcOpts.VersionID,
|
||||
DeleteMarker: lcOpts.DeleteMarker,
|
||||
}
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [ILM-EXPIRY]",
|
||||
})
|
||||
}
|
||||
|
||||
// should never reach here
|
||||
return nil
|
||||
}
|
||||
|
||||
// transition object to target specified by the transition ARN. When an object is transitioned to another
|
||||
// storage specified by the transition ARN, the metadata is left behind on source cluster and original content
|
||||
// is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved
|
||||
// to the transition tier without decrypting or re-encrypting.
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, objInfo ObjectInfo) error {
|
||||
lc, err := globalLifecycleSys.Get(objInfo.Bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lcOpts := lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
}
|
||||
arn := getLifecycleTransitionTargetArn(ctx, lc, objInfo.Bucket, lcOpts)
|
||||
if arn == nil {
|
||||
return fmt.Errorf("remote target not configured")
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arn.String())
|
||||
if tgt == nil {
|
||||
return fmt.Errorf("remote target not configured")
|
||||
}
|
||||
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, objInfo.Bucket, objInfo.Name, nil, http.Header{}, readLock, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
TransitionStatus: lifecycle.TransitionPending,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oi := gr.ObjInfo
|
||||
|
||||
if oi.TransitionStatus == lifecycle.TransitionComplete {
|
||||
return nil
|
||||
}
|
||||
|
||||
putOpts := putTransitionOpts(oi)
|
||||
if _, err = tgt.PutObject(ctx, arn.Bucket, oi.Name, gr, oi.Size, "", "", putOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
gr.Close()
|
||||
|
||||
var opts ObjectOptions
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(oi.Bucket)
|
||||
opts.VersionID = oi.VersionID
|
||||
opts.TransitionStatus = lifecycle.TransitionComplete
|
||||
eventName := event.ObjectTransitionComplete
|
||||
|
||||
_, err = objectAPI.DeleteObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
if err != nil {
|
||||
eventName = event.ObjectTransitionFailed
|
||||
}
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: oi.Bucket,
|
||||
Object: ObjectInfo{
|
||||
Name: oi.Name,
|
||||
VersionID: opts.VersionID,
|
||||
},
|
||||
Host: "Internal: [ILM-Transition]",
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// getLifecycleTransitionTargetArn returns transition ARN for storage class specified in the config.
|
||||
func getLifecycleTransitionTargetArn(ctx context.Context, lc *lifecycle.Lifecycle, bucket string, obj lifecycle.ObjectOpts) *madmin.ARN {
|
||||
for _, rule := range lc.FilterActionableRules(obj) {
|
||||
if rule.Transition.StorageClass != "" {
|
||||
return globalBucketTargetSys.GetRemoteArnWithLabel(ctx, bucket, rule.Transition.StorageClass)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getTransitionedObjectReader returns a reader from the transitioned tier.
|
||||
func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, oi ObjectInfo, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
var lc *lifecycle.Lifecycle
|
||||
lc, err = globalLifecycleSys.Get(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
arn := getLifecycleTransitionTargetArn(ctx, lc, bucket, lifecycle.ObjectOpts{
|
||||
Name: object,
|
||||
UserTags: oi.UserTags,
|
||||
ModTime: oi.ModTime,
|
||||
VersionID: oi.VersionID,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
IsLatest: oi.IsLatest,
|
||||
})
|
||||
if arn == nil {
|
||||
return nil, fmt.Errorf("remote target not configured")
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arn.String())
|
||||
if tgt == nil {
|
||||
return nil, fmt.Errorf("remote target not configured")
|
||||
}
|
||||
fn, off, length, err := NewGetObjectReader(rs, oi, opts)
|
||||
if err != nil {
|
||||
return nil, ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
gopts := miniogo.GetObjectOptions{VersionID: opts.VersionID}
|
||||
|
||||
// get correct offsets for encrypted object
|
||||
if off >= 0 && length >= 0 {
|
||||
if err := gopts.SetRange(off, off+length-1); err != nil {
|
||||
return nil, ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
}
|
||||
|
||||
reader, _, _, err := tgt.GetObject(ctx, arn.Bucket, object, gopts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
closeReader := func() { reader.Close() }
|
||||
|
||||
return fn(reader, h, opts.CheckPrecondFn, closeReader)
|
||||
}
|
||||
|
||||
// RestoreRequestType represents type of restore.
|
||||
type RestoreRequestType string
|
||||
|
||||
const (
|
||||
// SelectRestoreRequest specifies select request. This is the only valid value
|
||||
SelectRestoreRequest RestoreRequestType = "SELECT"
|
||||
)
|
||||
|
||||
// Encryption specifies encryption setting on restored bucket
|
||||
type Encryption struct {
|
||||
EncryptionType sse.SSEAlgorithm `xml:"EncryptionType"`
|
||||
KMSContext string `xml:"KMSContext,omitempty"`
|
||||
KMSKeyID string `xml:"KMSKeyId,omitempty"`
|
||||
}
|
||||
|
||||
// MetadataEntry denotes name and value.
|
||||
type MetadataEntry struct {
|
||||
Name string `xml:"Name"`
|
||||
Value string `xml:"Value"`
|
||||
}
|
||||
|
||||
// S3Location specifies s3 location that receives result of a restore object request
|
||||
type S3Location struct {
|
||||
BucketName string `xml:"BucketName,omitempty"`
|
||||
Encryption Encryption `xml:"Encryption,omitempty"`
|
||||
Prefix string `xml:"Prefix,omitempty"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"`
|
||||
Tagging *tags.Tags `xml:"Tagging,omitempty"`
|
||||
UserMetadata []MetadataEntry `xml:"UserMetadata"`
|
||||
}
|
||||
|
||||
// OutputLocation specifies bucket where object needs to be restored
|
||||
type OutputLocation struct {
|
||||
S3 S3Location `xml:"S3,omitempty"`
|
||||
}
|
||||
|
||||
// IsEmpty returns true if output location not specified.
|
||||
func (o *OutputLocation) IsEmpty() bool {
|
||||
return o.S3.BucketName == ""
|
||||
}
|
||||
|
||||
// SelectParameters specifies sql select parameters
|
||||
type SelectParameters struct {
|
||||
s3select.S3Select
|
||||
}
|
||||
|
||||
// IsEmpty returns true if no select parameters set
|
||||
func (sp *SelectParameters) IsEmpty() bool {
|
||||
return sp == nil || sp.S3Select == s3select.S3Select{}
|
||||
}
|
||||
|
||||
var (
|
||||
selectParamsXMLName = "SelectParameters"
|
||||
)
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (sp *SelectParameters) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// Essentially the same as S3Select barring the xml name.
|
||||
if start.Name.Local == selectParamsXMLName {
|
||||
start.Name = xml.Name{Space: "", Local: "SelectRequest"}
|
||||
}
|
||||
return sp.S3Select.UnmarshalXML(d, start)
|
||||
}
|
||||
|
||||
// RestoreObjectRequest - xml to restore a transitioned object
|
||||
type RestoreObjectRequest struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest" json:"-"`
|
||||
Days int `xml:"Days,omitempty"`
|
||||
Type RestoreRequestType `xml:"Type,omitempty"`
|
||||
Tier string `xml:"Tier,-"`
|
||||
Description string `xml:"Description,omitempty"`
|
||||
SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"`
|
||||
OutputLocation OutputLocation `xml:"OutputLocation,omitempty"`
|
||||
}
|
||||
|
||||
// Maximum 2MiB size per restore object request.
|
||||
const maxRestoreObjectRequestSize = 2 << 20
|
||||
|
||||
// parseRestoreRequest parses RestoreObjectRequest from xml
|
||||
func parseRestoreRequest(reader io.Reader) (*RestoreObjectRequest, error) {
|
||||
req := RestoreObjectRequest{}
|
||||
if err := xml.NewDecoder(io.LimitReader(reader, maxRestoreObjectRequestSize)).Decode(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &req, nil
|
||||
}
|
||||
|
||||
// validate a RestoreObjectRequest as per AWS S3 spec https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
|
||||
func (r *RestoreObjectRequest) validate(ctx context.Context, objAPI ObjectLayer) error {
|
||||
if r.Type != SelectRestoreRequest && !r.SelectParameters.IsEmpty() {
|
||||
return fmt.Errorf("Select parameters can only be specified with SELECT request type")
|
||||
}
|
||||
if r.Type == SelectRestoreRequest && r.SelectParameters.IsEmpty() {
|
||||
return fmt.Errorf("SELECT restore request requires select parameters to be specified")
|
||||
}
|
||||
|
||||
if r.Type != SelectRestoreRequest && !r.OutputLocation.IsEmpty() {
|
||||
return fmt.Errorf("OutputLocation required only for SELECT request type")
|
||||
}
|
||||
if r.Type == SelectRestoreRequest && r.OutputLocation.IsEmpty() {
|
||||
return fmt.Errorf("OutputLocation required for SELECT requests")
|
||||
}
|
||||
|
||||
if r.Days != 0 && r.Type == SelectRestoreRequest {
|
||||
return fmt.Errorf("Days cannot be specified with SELECT restore request")
|
||||
}
|
||||
if r.Days == 0 && r.Type != SelectRestoreRequest {
|
||||
return fmt.Errorf("restoration days should be at least 1")
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if !r.OutputLocation.IsEmpty() {
|
||||
if _, err := objAPI.GetBucketInfo(ctx, r.OutputLocation.S3.BucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.OutputLocation.S3.Prefix == "" {
|
||||
return fmt.Errorf("Prefix is a required parameter in OutputLocation")
|
||||
}
|
||||
if r.OutputLocation.S3.Encryption.EncryptionType != crypto.SSEAlgorithmAES256 {
|
||||
return NotImplemented{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// set ObjectOptions for PUT call to restore temporary copy of transitioned data
|
||||
func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo ObjectInfo) (putOpts ObjectOptions) {
|
||||
meta := make(map[string]string)
|
||||
sc := rreq.OutputLocation.S3.StorageClass
|
||||
if sc == "" {
|
||||
sc = objInfo.StorageClass
|
||||
}
|
||||
meta[strings.ToLower(xhttp.AmzStorageClass)] = sc
|
||||
|
||||
if rreq.Type == SelectRestoreRequest {
|
||||
for _, v := range rreq.OutputLocation.S3.UserMetadata {
|
||||
if !strings.HasPrefix("x-amz-meta", strings.ToLower(v.Name)) {
|
||||
meta["x-amz-meta-"+v.Name] = v.Value
|
||||
continue
|
||||
}
|
||||
meta[v.Name] = v.Value
|
||||
}
|
||||
meta[xhttp.AmzObjectTagging] = rreq.OutputLocation.S3.Tagging.String()
|
||||
if rreq.OutputLocation.S3.Encryption.EncryptionType != "" {
|
||||
meta[crypto.SSEHeader] = crypto.SSEAlgorithmAES256
|
||||
}
|
||||
return ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
UserDefined: meta,
|
||||
}
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
meta[k] = v
|
||||
}
|
||||
meta[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
|
||||
return ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
UserDefined: meta,
|
||||
VersionID: objInfo.VersionID,
|
||||
MTime: objInfo.ModTime,
|
||||
Expires: objInfo.Expires,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
errRestoreHDRMissing = fmt.Errorf("x-amz-restore header not found")
|
||||
errRestoreHDRMalformed = fmt.Errorf("x-amz-restore header malformed")
|
||||
)
|
||||
|
||||
// parse x-amz-restore header from user metadata to get the status of ongoing request and expiry of restoration
|
||||
// if any. This header value is of format: ongoing-request=true|false, expires=time
|
||||
func parseRestoreHeaderFromMeta(meta map[string]string) (ongoing bool, expiry time.Time, err error) {
|
||||
restoreHdr, ok := meta[xhttp.AmzRestore]
|
||||
if !ok {
|
||||
return ongoing, expiry, errRestoreHDRMissing
|
||||
}
|
||||
rslc := strings.SplitN(restoreHdr, ",", 2)
|
||||
if len(rslc) != 2 {
|
||||
return ongoing, expiry, errRestoreHDRMalformed
|
||||
}
|
||||
rstatusSlc := strings.SplitN(rslc[0], "=", 2)
|
||||
if len(rstatusSlc) != 2 {
|
||||
return ongoing, expiry, errRestoreHDRMalformed
|
||||
}
|
||||
rExpSlc := strings.SplitN(rslc[1], "=", 2)
|
||||
if len(rExpSlc) != 2 {
|
||||
return ongoing, expiry, errRestoreHDRMalformed
|
||||
}
|
||||
|
||||
expiry, err = time.Parse(http.TimeFormat, rExpSlc[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return rstatusSlc[1] == "true", expiry, nil
|
||||
}
|
||||
|
||||
// restoreTransitionedObject is similar to PostObjectRestore from AWS GLACIER
|
||||
// storage class. When PostObjectRestore API is called, a temporary copy of the object
|
||||
// is restored locally to the bucket on source cluster until the restore expiry date.
|
||||
// The copy that was transitioned continues to reside in the transitioned tier.
|
||||
func restoreTransitionedObject(ctx context.Context, bucket, object string, objAPI ObjectLayer, objInfo ObjectInfo, rreq *RestoreObjectRequest, restoreExpiry time.Time) error {
|
||||
var rs *HTTPRangeSpec
|
||||
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, objInfo, ObjectOptions{
|
||||
VersionID: objInfo.VersionID})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gr.Close()
|
||||
hashReader, err := hash.NewReader(gr, objInfo.Size, "", "", objInfo.Size, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pReader := NewPutObjReader(hashReader, nil, nil)
|
||||
opts := putRestoreOpts(bucket, object, rreq, objInfo)
|
||||
opts.UserDefined[xhttp.AmzRestore] = fmt.Sprintf("ongoing-request=%t, expiry-date=%s", false, restoreExpiry.Format(http.TimeFormat))
|
||||
if _, err := objAPI.PutObject(ctx, bucket, object, pReader, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -27,6 +26,7 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -113,10 +113,6 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if proxyRequestByBucket(ctx, w, r, bucket) {
|
||||
return
|
||||
}
|
||||
|
||||
listObjectVersions := objectAPI.ListObjectVersions
|
||||
|
||||
// Inititate a list object versions operation based on the input params.
|
||||
@@ -139,7 +135,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
// ListObjectsV2MHandler - GET Bucket (List Objects) Version 2 with metadata.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
// of the objects in a bucket. You can use the request parame<ters as selection
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
// NOTE: It is recommended that this API to be used for application development.
|
||||
@@ -179,13 +175,6 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
// Analyze continuation token and route the request accordingly
|
||||
var success bool
|
||||
token, success = proxyRequestByToken(ctx, w, r, token)
|
||||
if success {
|
||||
return
|
||||
}
|
||||
|
||||
listObjectsV2 := objectAPI.ListObjectsV2
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
@@ -201,9 +190,6 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
if nextContinuationToken != "" && listObjectsV2Info.IsTruncated {
|
||||
nextContinuationToken = fmt.Sprintf("%s@%d", listObjectsV2Info.NextContinuationToken, getLocalNodeIndex())
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, nextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
@@ -256,13 +242,6 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
// Analyze continuation token and route the request accordingly
|
||||
var success bool
|
||||
token, success = proxyRequestByToken(ctx, w, r, token)
|
||||
if success {
|
||||
return
|
||||
}
|
||||
|
||||
listObjectsV2 := objectAPI.ListObjectsV2
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
@@ -276,13 +255,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
if nextContinuationToken != "" && listObjectsV2Info.IsTruncated {
|
||||
nextContinuationToken = fmt.Sprintf("%s@%d", listObjectsV2Info.NextContinuationToken, getLocalNodeIndex())
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, nextContinuationToken, startAfter,
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, false)
|
||||
|
||||
@@ -290,18 +263,6 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
}
|
||||
|
||||
func getLocalNodeIndex() int {
|
||||
if len(globalProxyEndpoints) == 0 {
|
||||
return -1
|
||||
}
|
||||
for i, ep := range globalProxyEndpoints {
|
||||
if ep.IsLocal {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func parseRequestToken(token string) (subToken string, nodeIndex int) {
|
||||
if token == "" {
|
||||
return token, -1
|
||||
@@ -340,8 +301,8 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http
|
||||
return proxyRequest(ctx, w, r, ep)
|
||||
}
|
||||
|
||||
func proxyRequestByBucket(ctx context.Context, w http.ResponseWriter, r *http.Request, bucket string) (success bool) {
|
||||
return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(bucket, len(globalProxyEndpoints)))
|
||||
func proxyRequestByStringHash(ctx context.Context, w http.ResponseWriter, r *http.Request, str string) (success bool) {
|
||||
return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(str, len(globalProxyEndpoints)))
|
||||
}
|
||||
|
||||
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
|
||||
@@ -382,7 +343,12 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
if proxyRequestByBucket(ctx, w, r, bucket) {
|
||||
// Forward the request using Source IP or bucket
|
||||
forwardStr := handlers.GetSourceIPFromHeaders(r)
|
||||
if forwardStr == "" {
|
||||
forwardStr = bucket
|
||||
}
|
||||
if proxyRequestByStringHash(ctx, w, r, forwardStr) {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -49,6 +49,7 @@ func (sys *BucketMetadataSys) Remove(bucket string) {
|
||||
}
|
||||
sys.Lock()
|
||||
delete(sys.metadataMap, bucket)
|
||||
globalBucketMonitor.DeleteBucket(bucket)
|
||||
sys.Unlock()
|
||||
}
|
||||
|
||||
@@ -72,7 +73,7 @@ func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) {
|
||||
// Update update bucket metadata for the specified config file.
|
||||
// The configData data should not be modified after being sent here.
|
||||
func (sys *BucketMetadataSys) Update(bucket string, configFile string, configData []byte) error {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -167,9 +168,6 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
}
|
||||
meta.ReplicationConfigXML = configData
|
||||
case bucketTargetsFile:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.BucketTargetsConfigJSON = configData
|
||||
default:
|
||||
return fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
@@ -275,7 +273,7 @@ func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Life
|
||||
func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Config, error) {
|
||||
if globalIsGateway && globalGatewayName == NASBackendGateway {
|
||||
// Only needed in case of NAS gateway.
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -313,7 +311,7 @@ func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEC
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -373,10 +371,24 @@ func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.Buc
|
||||
return meta.bucketTargetConfig, nil
|
||||
}
|
||||
|
||||
// GetBucketTarget returns the target for the bucket and arn.
|
||||
func (sys *BucketMetadataSys) GetBucketTarget(bucket string, arn string) (madmin.BucketTarget, error) {
|
||||
targets, err := sys.GetBucketTargetsConfig(bucket)
|
||||
if err != nil {
|
||||
return madmin.BucketTarget{}, err
|
||||
}
|
||||
for _, t := range targets.Targets {
|
||||
if t.Arn == arn {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
return madmin.BucketTarget{}, errConfigNotFound
|
||||
}
|
||||
|
||||
// GetConfig returns a specific configuration from the bucket metadata.
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetConfig(bucket string) (BucketMetadata, error) {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return newBucketMetadata(bucket), errServerNotInitialized
|
||||
}
|
||||
@@ -418,12 +430,12 @@ func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, ob
|
||||
}
|
||||
|
||||
// Load bucket metadata sys in background
|
||||
go logger.LogIf(ctx, sys.load(ctx, buckets, objAPI))
|
||||
go sys.load(ctx, buckets, objAPI)
|
||||
return nil
|
||||
}
|
||||
|
||||
// concurrently load bucket metadata to speed up loading bucket metadata.
|
||||
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) {
|
||||
g := errgroup.WithNErrs(len(buckets))
|
||||
for index := range buckets {
|
||||
index := index
|
||||
@@ -440,22 +452,20 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
}
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
return err
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loads bucket metadata for all buckets into BucketMetadataSys.
|
||||
func (sys *BucketMetadataSys) load(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
func (sys *BucketMetadataSys) load(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) {
|
||||
count := 100 // load 100 bucket metadata at a time.
|
||||
for {
|
||||
if len(buckets) < count {
|
||||
return sys.concurrentLoad(ctx, buckets, objAPI)
|
||||
}
|
||||
if err := sys.concurrentLoad(ctx, buckets[:count], objAPI); err != nil {
|
||||
return err
|
||||
sys.concurrentLoad(ctx, buckets, objAPI)
|
||||
return
|
||||
}
|
||||
sys.concurrentLoad(ctx, buckets[:count], objAPI)
|
||||
buckets = buckets[count:]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,6 +107,10 @@ func newBucketMetadata(name string) BucketMetadata {
|
||||
// Load - loads the metadata of bucket by name from ObjectLayer api.
|
||||
// If an error is returned the returned metadata will be default initialized.
|
||||
func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string) error {
|
||||
if name == "" {
|
||||
logger.LogIf(ctx, errors.New("bucket name cannot be empty"))
|
||||
return errors.New("bucket name cannot be empty")
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
|
||||
data, err := readConfig(ctx, api, configFile)
|
||||
if err != nil {
|
||||
@@ -128,20 +132,22 @@ func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string)
|
||||
}
|
||||
// OK, parse data.
|
||||
_, err = b.UnmarshalMsg(data[4:])
|
||||
b.Name = name // in-case parsing failed for some reason, make sure bucket name is not empty.
|
||||
return err
|
||||
}
|
||||
|
||||
// loadBucketMetadata loads and migrates to bucket metadata.
|
||||
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (BucketMetadata, error) {
|
||||
b := newBucketMetadata(bucket)
|
||||
err := b.Load(ctx, objectAPI, bucket)
|
||||
err := b.Load(ctx, objectAPI, b.Name)
|
||||
if err == nil {
|
||||
return b, b.convertLegacyConfigs(ctx, objectAPI)
|
||||
}
|
||||
|
||||
if err != errConfigNotFound {
|
||||
if !errors.Is(err, errConfigNotFound) {
|
||||
return b, err
|
||||
}
|
||||
|
||||
// Old bucket without bucket metadata. Hence we migrate existing settings.
|
||||
return b, b.convertLegacyConfigs(ctx, objectAPI)
|
||||
}
|
||||
|
||||
@@ -21,10 +21,12 @@ import (
|
||||
"math"
|
||||
"net/http"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
)
|
||||
|
||||
// BucketObjectLockSys - map of bucket and retention configuration.
|
||||
@@ -33,7 +35,7 @@ type BucketObjectLockSys struct{}
|
||||
// Get - Get retention configuration.
|
||||
func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return r, errServerNotInitialized
|
||||
}
|
||||
@@ -80,24 +82,22 @@ func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locke
|
||||
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
|
||||
// governance bypass headers are set and user has governance bypass permissions.
|
||||
// Objects in "Compliance" mode can be overwritten only if retention date is past.
|
||||
func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket string, object ObjectToDelete, getObjectInfoFn GetObjectInfoFn) APIErrorCode {
|
||||
func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket string, object ObjectToDelete, oi ObjectInfo, gerr error) APIErrorCode {
|
||||
opts, err := getOpts(ctx, r, bucket, object.ObjectName)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
opts.VersionID = object.VersionID
|
||||
|
||||
oi, err := getObjectInfoFn(ctx, bucket, object.ObjectName, opts)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
if gerr != nil { // error from GetObjectInfo
|
||||
switch gerr.(type) {
|
||||
case MethodNotAllowed: // This happens usually for a delete marker
|
||||
if oi.DeleteMarker {
|
||||
// Delete marker should be present and valid.
|
||||
return ErrNone
|
||||
}
|
||||
}
|
||||
return toAPIErrorCode(ctx, err)
|
||||
return toAPIErrorCode(ctx, gerr)
|
||||
}
|
||||
|
||||
lhold := objectlock.GetObjectLegalHoldMeta(oi.UserDefined)
|
||||
@@ -245,13 +245,13 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket,
|
||||
// For objects in "Compliance" mode, retention date cannot be shortened, and mode cannot be altered.
|
||||
// For objects with legal hold header set, the s3:PutObjectLegalHold permission is expected to be set
|
||||
// Both legal hold and retention can be applied independently on an object
|
||||
func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, retentionPermErr, legalHoldPermErr APIErrorCode) (objectlock.RetMode, objectlock.RetentionDate, objectlock.ObjectLegalHold, APIErrorCode) {
|
||||
func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, retentionPermErr, legalHoldPermErr APIErrorCode) (objectlock.RetMode, objectlock.RetentionDate, objectlock.ObjectLegalHold, APIErrorCode) {
|
||||
var mode objectlock.RetMode
|
||||
var retainDate objectlock.RetentionDate
|
||||
var legalHold objectlock.ObjectLegalHold
|
||||
|
||||
retentionRequested := objectlock.IsObjectLockRetentionRequested(r.Header)
|
||||
legalHoldRequested := objectlock.IsObjectLockLegalHoldRequested(r.Header)
|
||||
retentionRequested := objectlock.IsObjectLockRetentionRequested(rq.Header)
|
||||
legalHoldRequested := objectlock.IsObjectLockLegalHoldRequested(rq.Header)
|
||||
|
||||
retentionCfg, err := globalBucketObjectLockSys.Get(bucket)
|
||||
if err != nil {
|
||||
@@ -267,25 +267,24 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
|
||||
return mode, retainDate, legalHold, ErrNone
|
||||
}
|
||||
|
||||
opts, err := getOpts(ctx, r, bucket, object)
|
||||
opts, err := getOpts(ctx, rq, bucket, object)
|
||||
if err != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
if opts.VersionID != "" {
|
||||
replica := rq.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String()
|
||||
|
||||
if opts.VersionID != "" && !replica {
|
||||
if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil {
|
||||
r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
|
||||
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return mode, retainDate, legalHold, ErrObjectLocked
|
||||
}
|
||||
|
||||
if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) {
|
||||
return mode, retainDate, legalHold, ErrObjectLocked
|
||||
}
|
||||
|
||||
mode = r.Mode
|
||||
retainDate = r.RetainUntilDate
|
||||
legalHold = objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
|
||||
@@ -298,17 +297,17 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
|
||||
|
||||
if legalHoldRequested {
|
||||
var lerr error
|
||||
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(r.Header); lerr != nil {
|
||||
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(rq.Header); lerr != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
if retentionRequested {
|
||||
legalHold, err := objectlock.ParseObjectLockLegalHoldHeaders(r.Header)
|
||||
legalHold, err := objectlock.ParseObjectLockLegalHoldHeaders(rq.Header)
|
||||
if err != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(r.Header)
|
||||
rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(rq.Header)
|
||||
if err != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
@@ -317,7 +316,9 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
|
||||
}
|
||||
return rMode, rDate, legalHold, ErrNone
|
||||
}
|
||||
|
||||
if replica { // replica inherits retention metadata only from source
|
||||
return "", objectlock.RetentionDate{}, legalHold, ErrNone
|
||||
}
|
||||
if !retentionRequested && retentionCfg.Validity > 0 {
|
||||
if retentionPermErr != ErrNone {
|
||||
return mode, retainDate, legalHold, retentionPermErr
|
||||
|
||||
@@ -63,7 +63,7 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
|
||||
}
|
||||
|
||||
func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64) error {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -71,6 +71,8 @@ func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64)
|
||||
sys.bucketStorageCache.Once.Do(func() {
|
||||
sys.bucketStorageCache.TTL = 1 * time.Second
|
||||
sys.bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer done()
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -18,25 +18,30 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/bandwidth"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
// gets replication config associated to a given bucket name.
|
||||
func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -50,12 +55,19 @@ func getReplicationConfig(ctx context.Context, bucketName string) (rc *replicati
|
||||
// validateReplicationDestination returns error if replication destination bucket missing or not configured
|
||||
// It also returns true if replication destination is same as this server.
|
||||
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config) (bool, error) {
|
||||
clnt := globalBucketTargetSys.GetReplicationTargetClient(ctx, rCfg.RoleArn)
|
||||
arn, err := madmin.ParseARN(rCfg.RoleArn)
|
||||
if err != nil {
|
||||
return false, BucketRemoteArnInvalid{}
|
||||
}
|
||||
if arn.Type != madmin.ReplicationService {
|
||||
return false, BucketRemoteArnTypeInvalid{}
|
||||
}
|
||||
clnt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rCfg.RoleArn)
|
||||
if clnt == nil {
|
||||
return false, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
if found, _ := clnt.BucketExists(ctx, rCfg.GetDestination().Bucket); !found {
|
||||
return false, BucketReplicationDestinationNotFound{Bucket: rCfg.GetDestination().Bucket}
|
||||
return false, BucketRemoteDestinationNotFound{Bucket: rCfg.GetDestination().Bucket}
|
||||
}
|
||||
if ret, err := globalBucketObjectLockSys.Get(bucket); err == nil {
|
||||
if ret.LockEnabled {
|
||||
@@ -80,19 +92,19 @@ func mustReplicateWeb(ctx context.Context, r *http.Request, bucket, object strin
|
||||
if permErr != ErrNone {
|
||||
return false
|
||||
}
|
||||
return mustReplicater(ctx, r, bucket, object, meta, replStatus)
|
||||
return mustReplicater(ctx, bucket, object, meta, replStatus)
|
||||
}
|
||||
|
||||
// mustReplicate returns true if object meets replication criteria.
|
||||
func mustReplicate(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string) bool {
|
||||
if s3Err := isPutActionAllowed(getRequestAuthType(r), bucket, object, r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
|
||||
if s3Err := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, "", r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
|
||||
return false
|
||||
}
|
||||
return mustReplicater(ctx, r, bucket, object, meta, replStatus)
|
||||
return mustReplicater(ctx, bucket, object, meta, replStatus)
|
||||
}
|
||||
|
||||
// mustReplicater returns true if object meets replication criteria.
|
||||
func mustReplicater(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string) bool {
|
||||
func mustReplicater(ctx context.Context, bucket, object string, meta map[string]string, replStatus string) bool {
|
||||
if globalIsGateway {
|
||||
return false
|
||||
}
|
||||
@@ -117,7 +129,171 @@ func mustReplicater(ctx context.Context, r *http.Request, bucket, object string,
|
||||
return cfg.Replicate(opts)
|
||||
}
|
||||
|
||||
func putReplicationOpts(dest replication.Destination, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
|
||||
// returns true if any of the objects being deleted qualifies for replication.
|
||||
func hasReplicationRules(ctx context.Context, bucket string, objects []ObjectToDelete) bool {
|
||||
c, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || c == nil {
|
||||
return false
|
||||
}
|
||||
for _, obj := range objects {
|
||||
if c.HasActiveRules(obj.ObjectName, true) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// returns whether object version is a deletemarker and if object qualifies for replication
|
||||
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, gerr error) (dm, replicate bool) {
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || rcfg == nil {
|
||||
return false, false
|
||||
}
|
||||
// when incoming delete is removal of a delete marker( a.k.a versioned delete),
|
||||
// GetObjectInfo returns extra information even though it returns errFileNotFound
|
||||
if gerr != nil {
|
||||
validReplStatus := false
|
||||
switch oi.ReplicationStatus {
|
||||
case replication.Pending, replication.Complete, replication.Failed:
|
||||
validReplStatus = true
|
||||
}
|
||||
if oi.DeleteMarker && validReplStatus {
|
||||
return oi.DeleteMarker, true
|
||||
}
|
||||
return oi.DeleteMarker, false
|
||||
}
|
||||
opts := replication.ObjectOpts{
|
||||
Name: dobj.ObjectName,
|
||||
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
|
||||
UserTags: oi.UserTags,
|
||||
DeleteMarker: true,
|
||||
VersionID: dobj.VersionID,
|
||||
}
|
||||
return oi.DeleteMarker, rcfg.Replicate(opts)
|
||||
}
|
||||
|
||||
// replicate deletes to the designated replication target if replication configuration
|
||||
// has delete marker replication or delete replication (MinIO extension to allow deletes where version id
|
||||
// is specified) enabled.
|
||||
// Similar to bucket replication for PUT operation, soft delete (a.k.a setting delete marker) and
|
||||
// permanent deletes (by specifying a version ID in the delete operation) have three states "Pending", "Complete"
|
||||
// and "Failed" to mark the status of the replication of "DELETE" operation. All failed operations can
|
||||
// then be retried by healing. In the case of permanent deletes, until the replication is completed on the
|
||||
// target cluster, the object version is marked deleted on the source and hidden from listing. It is permanently
|
||||
// deleted from the source when the VersionPurgeStatus changes to "Complete", i.e after replication succeeds
|
||||
// on target.
|
||||
func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectAPI ObjectLayer) {
|
||||
bucket := dobj.Bucket
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || rcfg == nil {
|
||||
return
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
|
||||
if tgt == nil {
|
||||
return
|
||||
}
|
||||
versionID := dobj.DeleteMarkerVersionID
|
||||
if versionID == "" {
|
||||
versionID = dobj.VersionID
|
||||
}
|
||||
rmErr := tgt.RemoveObject(ctx, rcfg.GetDestination().Bucket, dobj.ObjectName, miniogo.RemoveObjectOptions{
|
||||
VersionID: versionID,
|
||||
Internal: miniogo.AdvancedRemoveOptions{
|
||||
ReplicationDeleteMarker: dobj.DeleteMarkerVersionID != "",
|
||||
ReplicationMTime: dobj.DeleteMarkerMTime.Time,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
},
|
||||
})
|
||||
|
||||
replicationStatus := dobj.DeleteMarkerReplicationStatus
|
||||
versionPurgeStatus := dobj.VersionPurgeStatus
|
||||
|
||||
if rmErr != nil {
|
||||
if dobj.VersionID == "" {
|
||||
replicationStatus = string(replication.Failed)
|
||||
} else {
|
||||
versionPurgeStatus = Failed
|
||||
}
|
||||
} else {
|
||||
if dobj.VersionID == "" {
|
||||
replicationStatus = string(replication.Complete)
|
||||
} else {
|
||||
versionPurgeStatus = Complete
|
||||
}
|
||||
}
|
||||
var eventName = event.ObjectReplicationComplete
|
||||
if replicationStatus == string(replication.Failed) || versionPurgeStatus == Failed {
|
||||
eventName = event.ObjectReplicationFailed
|
||||
}
|
||||
objInfo := ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
VersionID: versionID,
|
||||
ReplicationStatus: replication.StatusType(dobj.DeleteMarkerReplicationStatus),
|
||||
VersionPurgeStatus: versionPurgeStatus,
|
||||
}
|
||||
|
||||
eventArg := &eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
EventName: eventName,
|
||||
}
|
||||
sendEvent(*eventArg)
|
||||
|
||||
// Update metadata on the delete marker or purge permanent delete if replication success.
|
||||
if _, err = objectAPI.DeleteObject(ctx, bucket, dobj.ObjectName, ObjectOptions{
|
||||
VersionID: versionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
DeleteMarkerReplicationStatus: replicationStatus,
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionPurgeStatus: versionPurgeStatus,
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s %s: %w", bucket, dobj.ObjectName, dobj.VersionID, err))
|
||||
}
|
||||
}
|
||||
|
||||
func getCopyObjMetadata(oi ObjectInfo, dest replication.Destination) map[string]string {
|
||||
meta := make(map[string]string, len(oi.UserDefined))
|
||||
for k, v := range oi.UserDefined {
|
||||
if k == xhttp.AmzBucketReplicationStatus {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
continue
|
||||
}
|
||||
meta[k] = v
|
||||
}
|
||||
if oi.ContentEncoding != "" {
|
||||
meta[xhttp.ContentEncoding] = oi.ContentEncoding
|
||||
}
|
||||
if oi.ContentType != "" {
|
||||
meta[xhttp.ContentType] = oi.ContentType
|
||||
}
|
||||
tag, err := tags.ParseObjectTags(oi.UserTags)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if tag != nil {
|
||||
meta[xhttp.AmzObjectTagging] = tag.String()
|
||||
meta[xhttp.AmzTagDirective] = "REPLACE"
|
||||
}
|
||||
sc := dest.StorageClass
|
||||
if sc == "" {
|
||||
sc = oi.StorageClass
|
||||
}
|
||||
meta[xhttp.AmzStorageClass] = sc
|
||||
if oi.UserTags != "" {
|
||||
meta[xhttp.AmzObjectTagging] = oi.UserTags
|
||||
}
|
||||
meta[xhttp.MinIOSourceMTime] = oi.ModTime.Format(time.RFC3339)
|
||||
meta[xhttp.MinIOSourceETag] = oi.ETag
|
||||
meta[xhttp.AmzBucketReplicationStatus] = replication.Replica.String()
|
||||
return meta
|
||||
}
|
||||
|
||||
func putReplicationOpts(ctx context.Context, dest replication.Destination, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
|
||||
meta := make(map[string]string)
|
||||
for k, v := range objInfo.UserDefined {
|
||||
if k == xhttp.AmzBucketReplicationStatus {
|
||||
@@ -137,15 +313,17 @@ func putReplicationOpts(dest replication.Destination, objInfo ObjectInfo) (putOp
|
||||
sc = objInfo.StorageClass
|
||||
}
|
||||
putOpts = miniogo.PutObjectOptions{
|
||||
UserMetadata: meta,
|
||||
UserTags: tag.ToMap(),
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: sc,
|
||||
ReplicationVersionID: objInfo.VersionID,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
ReplicationMTime: objInfo.ModTime,
|
||||
ReplicationETag: objInfo.ETag,
|
||||
UserMetadata: meta,
|
||||
UserTags: tag.ToMap(),
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: sc,
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
},
|
||||
}
|
||||
if mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {
|
||||
rmode := miniogo.RetentionMode(mode)
|
||||
@@ -164,75 +342,163 @@ func putReplicationOpts(dest replication.Destination, objInfo ObjectInfo) (putOp
|
||||
if crypto.S3.IsEncrypted(objInfo.UserDefined) {
|
||||
putOpts.ServerSideEncryption = encrypt.NewSSE()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type replicationAction string
|
||||
|
||||
const (
|
||||
replicateMetadata replicationAction = "metadata"
|
||||
replicateNone replicationAction = "none"
|
||||
replicateAll replicationAction = "all"
|
||||
)
|
||||
|
||||
// returns replicationAction by comparing metadata between source and target
|
||||
func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo) replicationAction {
|
||||
// needs full replication
|
||||
if oi1.ETag != oi2.ETag ||
|
||||
oi1.VersionID != oi2.VersionID ||
|
||||
oi1.Size != oi2.Size ||
|
||||
oi1.DeleteMarker != oi2.IsDeleteMarker {
|
||||
return replicateAll
|
||||
}
|
||||
|
||||
if !oi1.ModTime.Equal(oi2.LastModified) ||
|
||||
oi1.ContentType != oi2.ContentType ||
|
||||
oi1.StorageClass != oi2.StorageClass {
|
||||
return replicateMetadata
|
||||
}
|
||||
if oi1.ContentEncoding != "" {
|
||||
enc, ok := oi2.UserMetadata[xhttp.ContentEncoding]
|
||||
if !ok || enc != oi1.ContentEncoding {
|
||||
return replicateMetadata
|
||||
}
|
||||
}
|
||||
for k, v := range oi2.UserMetadata {
|
||||
oi2.Metadata[k] = []string{v}
|
||||
}
|
||||
if len(oi2.Metadata) != len(oi1.UserDefined) {
|
||||
return replicateMetadata
|
||||
}
|
||||
for k1, v1 := range oi1.UserDefined {
|
||||
if v2, ok := oi2.Metadata[k1]; !ok || v1 != strings.Join(v2, "") {
|
||||
return replicateMetadata
|
||||
}
|
||||
}
|
||||
t, _ := tags.MapToObjectTags(oi2.UserTags)
|
||||
if t.String() != oi1.UserTags {
|
||||
return replicateMetadata
|
||||
}
|
||||
return replicateNone
|
||||
}
|
||||
|
||||
// replicateObject replicates the specified version of the object to destination bucket
|
||||
// The source object is then updated to reflect the replication status.
|
||||
func replicateObject(ctx context.Context, bucket, object, versionID string, objectAPI ObjectLayer, eventArg *eventArgs, healPending bool) {
|
||||
func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLayer) {
|
||||
bucket := objInfo.Bucket
|
||||
object := objInfo.Name
|
||||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetReplicationTargetClient(ctx, cfg.RoleArn)
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, cfg.RoleArn)
|
||||
if tgt == nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, cfg.RoleArn))
|
||||
return
|
||||
}
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer gr.Close()
|
||||
objInfo := gr.ObjInfo
|
||||
objInfo = gr.ObjInfo
|
||||
size, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
gr.Close()
|
||||
return
|
||||
}
|
||||
|
||||
dest := cfg.GetDestination()
|
||||
if dest.Bucket == "" {
|
||||
gr.Close()
|
||||
return
|
||||
}
|
||||
// In the rare event that replication is in pending state either due to
|
||||
// server shut down/crash before replication completed or healing and PutObject
|
||||
// race - do an additional stat to see if the version ID exists
|
||||
if healPending {
|
||||
_, err := tgt.StatObject(ctx, dest.Bucket, object, miniogo.StatObjectOptions{VersionID: objInfo.VersionID})
|
||||
if err == nil {
|
||||
|
||||
rtype := replicateAll
|
||||
oi, err := tgt.StatObject(ctx, dest.Bucket, object, miniogo.StatObjectOptions{VersionID: objInfo.VersionID})
|
||||
if err == nil {
|
||||
rtype = getReplicationAction(objInfo, oi)
|
||||
if rtype == replicateNone {
|
||||
gr.Close()
|
||||
// object with same VersionID already exists, replication kicked off by
|
||||
// PutObject might have completed.
|
||||
return
|
||||
}
|
||||
}
|
||||
putOpts := putReplicationOpts(dest, objInfo)
|
||||
|
||||
target, err := globalBucketMetadataSys.GetBucketTarget(bucket, cfg.RoleArn)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s cfg:%s err:%s", bucket, cfg.RoleArn, err))
|
||||
return
|
||||
}
|
||||
putOpts := putReplicationOpts(ctx, dest, objInfo)
|
||||
replicationStatus := replication.Complete
|
||||
_, err = tgt.PutObject(ctx, dest.Bucket, object, gr, size, "", "", putOpts)
|
||||
|
||||
// Setup bandwidth throttling
|
||||
peers, _ := globalEndpoints.peers()
|
||||
totalNodesCount := len(peers)
|
||||
if totalNodesCount == 0 {
|
||||
totalNodesCount = 1 // For standalone erasure coding
|
||||
}
|
||||
b := target.BandwidthLimit / int64(totalNodesCount)
|
||||
var headerSize int
|
||||
for k, v := range putOpts.Header() {
|
||||
headerSize += len(k) + len(v)
|
||||
}
|
||||
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, objInfo.Bucket, objInfo.Name, gr, headerSize, b, target.BandwidthLimit)
|
||||
if rtype == replicateAll {
|
||||
_, err = tgt.PutObject(ctx, dest.Bucket, object, r, size, "", "", putOpts)
|
||||
} else {
|
||||
// replicate metadata for object tagging/copy with metadata replacement
|
||||
dstOpts := miniogo.PutObjectOptions{Internal: miniogo.AdvancedPutOptions{SourceVersionID: objInfo.VersionID}}
|
||||
_, err = tgt.CopyObject(ctx, dest.Bucket, object, dest.Bucket, object, getCopyObjMetadata(objInfo, dest), dstOpts)
|
||||
}
|
||||
|
||||
r.Close()
|
||||
if err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
// Notify replication failure event.
|
||||
if eventArg == nil {
|
||||
eventArg = &eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
}
|
||||
}
|
||||
eventArg.EventName = event.OperationReplicationFailed
|
||||
eventArg.Object.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||
sendEvent(*eventArg)
|
||||
}
|
||||
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||
if objInfo.UserTags != "" {
|
||||
objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
}
|
||||
|
||||
// FIXME: add support for missing replication events
|
||||
// - event.ObjectReplicationNotTracked
|
||||
// - event.ObjectReplicationMissedThreshold
|
||||
// - event.ObjectReplicationReplicatedAfterThreshold
|
||||
var eventName = event.ObjectReplicationComplete
|
||||
if replicationStatus == replication.Failed {
|
||||
eventName = event.ObjectReplicationFailed
|
||||
}
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
objInfo.metadataOnly = true // Perform only metadata updates.
|
||||
if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
}, ObjectOptions{VersionID: objInfo.VersionID}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s: %s", objInfo.VersionID, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -258,3 +524,95 @@ func filterReplicationStatusMetadata(metadata map[string]string) map[string]stri
|
||||
delKey(xhttp.AmzBucketReplicationStatus)
|
||||
return dst
|
||||
}
|
||||
|
||||
// DeletedObjectVersionInfo has info on deleted object
|
||||
type DeletedObjectVersionInfo struct {
|
||||
DeletedObject
|
||||
Bucket string
|
||||
}
|
||||
type replicationState struct {
|
||||
// add future metrics here
|
||||
replicaCh chan ObjectInfo
|
||||
replicaDeleteCh chan DeletedObjectVersionInfo
|
||||
}
|
||||
|
||||
func (r *replicationState) queueReplicaTask(oi ObjectInfo) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case r.replicaCh <- oi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (r *replicationState) queueReplicaDeleteTask(doi DeletedObjectVersionInfo) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case r.replicaDeleteCh <- doi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalReplicationState *replicationState
|
||||
// TODO: currently keeping it conservative
|
||||
// but eventually can be tuned in future,
|
||||
// take only half the CPUs for replication
|
||||
// conservatively.
|
||||
globalReplicationConcurrent = runtime.GOMAXPROCS(0) / 2
|
||||
)
|
||||
|
||||
func newReplicationState() *replicationState {
|
||||
|
||||
// fix minimum concurrent replication to 1 for single CPU setup
|
||||
if globalReplicationConcurrent == 0 {
|
||||
globalReplicationConcurrent = 1
|
||||
}
|
||||
rs := &replicationState{
|
||||
replicaCh: make(chan ObjectInfo, 10000),
|
||||
replicaDeleteCh: make(chan DeletedObjectVersionInfo, 10000),
|
||||
}
|
||||
go func() {
|
||||
<-GlobalContext.Done()
|
||||
close(rs.replicaCh)
|
||||
close(rs.replicaDeleteCh)
|
||||
}()
|
||||
return rs
|
||||
}
|
||||
|
||||
// addWorker creates a new worker to process tasks
|
||||
func (r *replicationState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
// Add a new worker.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case oi, ok := <-r.replicaCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateObject(ctx, oi, objectAPI)
|
||||
case doi, ok := <-r.replicaDeleteCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateDelete(ctx, doi, objectAPI)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
|
||||
if globalReplicationState == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start with globalReplicationConcurrent.
|
||||
for i := 0; i < globalReplicationConcurrent; i++ {
|
||||
globalReplicationState.addWorker(ctx, objectAPI)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,14 +18,18 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
// BucketTargetSys represents bucket targets subsystem
|
||||
@@ -33,7 +37,6 @@ type BucketTargetSys struct {
|
||||
sync.RWMutex
|
||||
arnRemotesMap map[string]*miniogo.Core
|
||||
targetsMap map[string][]madmin.BucketTarget
|
||||
clientsCache map[string]*miniogo.Core
|
||||
}
|
||||
|
||||
// ListTargets lists bucket targets across tenant or for individual bucket, and returns
|
||||
@@ -75,59 +78,84 @@ func (sys *BucketTargetSys) ListBucketTargets(ctx context.Context, bucket string
|
||||
}
|
||||
|
||||
// SetTarget - sets a new minio-go client target for this bucket.
|
||||
func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *madmin.BucketTarget) error {
|
||||
func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *madmin.BucketTarget, update bool) error {
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
if !tgt.Type.IsValid() {
|
||||
if !tgt.Type.IsValid() && !update {
|
||||
return BucketRemoteArnTypeInvalid{Bucket: bucket}
|
||||
}
|
||||
clnt, err := sys.getRemoteTargetClient(tgt)
|
||||
if err != nil {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
// validate if target credentials are ok
|
||||
if _, err = clnt.BucketExists(ctx, tgt.TargetBucket); err != nil {
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if tgt.Type == madmin.ReplicationService {
|
||||
if !globalIsErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
if !globalBucketVersioningSys.Enabled(bucket) {
|
||||
return BucketReplicationSourceNotVersioned{Bucket: bucket}
|
||||
}
|
||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||
if err != nil {
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketReplicationTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
}
|
||||
if tgt.Type == madmin.ILMService {
|
||||
if globalBucketVersioningSys.Enabled(bucket) {
|
||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||
if err != nil {
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
tgts := sys.targetsMap[bucket]
|
||||
newtgts := make([]madmin.BucketTarget, len(tgts))
|
||||
labels := make(map[string]struct{})
|
||||
found := false
|
||||
for idx, t := range tgts {
|
||||
labels[t.Label] = struct{}{}
|
||||
if t.Type == tgt.Type {
|
||||
if t.Arn == tgt.Arn {
|
||||
if t.Arn == tgt.Arn && !update {
|
||||
return BucketRemoteAlreadyExists{Bucket: t.TargetBucket}
|
||||
}
|
||||
if t.Label == tgt.Label && !update {
|
||||
return BucketRemoteLabelInUse{Bucket: t.TargetBucket}
|
||||
}
|
||||
newtgts[idx] = *tgt
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
newtgts[idx] = t
|
||||
}
|
||||
if !found {
|
||||
if _, ok := labels[tgt.Label]; ok && !update {
|
||||
return BucketRemoteLabelInUse{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if !found && !update {
|
||||
newtgts = append(newtgts, *tgt)
|
||||
}
|
||||
|
||||
sys.targetsMap[bucket] = newtgts
|
||||
sys.arnRemotesMap[tgt.Arn] = clnt
|
||||
if _, ok := sys.clientsCache[clnt.EndpointURL().String()]; !ok {
|
||||
sys.clientsCache[clnt.EndpointURL().String()] = clnt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -144,6 +172,9 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
return BucketRemoteArnInvalid{Bucket: bucket}
|
||||
}
|
||||
if arn.Type == madmin.ReplicationService {
|
||||
if !globalIsErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
// reject removal of remote target if replication configuration is present
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err == nil && rcfg.RoleArn == arnStr {
|
||||
@@ -152,6 +183,16 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
}
|
||||
}
|
||||
}
|
||||
if arn.Type == madmin.ILMService {
|
||||
// reject removal of remote target if lifecycle transition uses this arn
|
||||
config, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
|
||||
if err == nil && transitionSCInUse(ctx, config, bucket, arnStr) {
|
||||
if _, ok := sys.arnRemotesMap[arnStr]; ok {
|
||||
return BucketRemoteRemoveDisallowed{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// delete ARN type from list of matching targets
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
@@ -173,19 +214,56 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetReplicationTargetClient returns minio-go client for replication target instance
|
||||
func (sys *BucketTargetSys) GetReplicationTargetClient(ctx context.Context, arn string) *miniogo.Core {
|
||||
// GetRemoteTargetClient returns minio-go client for replication target instance
|
||||
func (sys *BucketTargetSys) GetRemoteTargetClient(ctx context.Context, arn string) *miniogo.Core {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
return sys.arnRemotesMap[arn]
|
||||
}
|
||||
|
||||
// GetRemoteTargetWithLabel returns bucket target given a target label
|
||||
func (sys *BucketTargetSys) GetRemoteTargetWithLabel(ctx context.Context, bucket, targetLabel string) *madmin.BucketTarget {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
for _, t := range sys.targetsMap[bucket] {
|
||||
if strings.ToUpper(t.Label) == strings.ToUpper(targetLabel) {
|
||||
tgt := t.Clone()
|
||||
return &tgt
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRemoteArnWithLabel returns bucket target's ARN given its target label
|
||||
func (sys *BucketTargetSys) GetRemoteArnWithLabel(ctx context.Context, bucket, tgtLabel string) *madmin.ARN {
|
||||
tgt := sys.GetRemoteTargetWithLabel(ctx, bucket, tgtLabel)
|
||||
if tgt == nil {
|
||||
return nil
|
||||
}
|
||||
arn, err := madmin.ParseARN(tgt.Arn)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return arn
|
||||
}
|
||||
|
||||
// GetRemoteLabelWithArn returns a bucket target's label given its ARN
|
||||
func (sys *BucketTargetSys) GetRemoteLabelWithArn(ctx context.Context, bucket, arnStr string) string {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
for _, t := range sys.targetsMap[bucket] {
|
||||
if t.Arn == arnStr {
|
||||
return t.Label
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// NewBucketTargetSys - creates new replication system.
|
||||
func NewBucketTargetSys() *BucketTargetSys {
|
||||
return &BucketTargetSys{
|
||||
arnRemotesMap: make(map[string]*miniogo.Core),
|
||||
targetsMap: make(map[string][]madmin.BucketTarget),
|
||||
clientsCache: make(map[string]*miniogo.Core),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -205,14 +283,14 @@ func (sys *BucketTargetSys) Init(ctx context.Context, buckets []BucketInfo, objA
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateTarget updates target to reflect metadata updates
|
||||
func (sys *BucketTargetSys) UpdateTarget(bucket string, cfg *madmin.BucketTargets) {
|
||||
// UpdateAllTargets updates target to reflect metadata updates
|
||||
func (sys *BucketTargetSys) UpdateAllTargets(bucket string, tgts *madmin.BucketTargets) {
|
||||
if sys == nil {
|
||||
return
|
||||
}
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
if cfg == nil || cfg.Empty() {
|
||||
if tgts == nil || tgts.Empty() {
|
||||
// remove target and arn association
|
||||
if tgts, ok := sys.targetsMap[bucket]; ok {
|
||||
for _, t := range tgts {
|
||||
@@ -223,20 +301,17 @@ func (sys *BucketTargetSys) UpdateTarget(bucket string, cfg *madmin.BucketTarget
|
||||
return
|
||||
}
|
||||
|
||||
if len(cfg.Targets) > 0 {
|
||||
sys.targetsMap[bucket] = cfg.Targets
|
||||
if len(tgts.Targets) > 0 {
|
||||
sys.targetsMap[bucket] = tgts.Targets
|
||||
}
|
||||
for _, tgt := range cfg.Targets {
|
||||
for _, tgt := range tgts.Targets {
|
||||
tgtClient, err := sys.getRemoteTargetClient(&tgt)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
||||
if _, ok := sys.clientsCache[tgtClient.EndpointURL().String()]; !ok {
|
||||
sys.clientsCache[tgtClient.EndpointURL().String()] = tgtClient
|
||||
}
|
||||
}
|
||||
sys.targetsMap[bucket] = cfg.Targets
|
||||
sys.targetsMap[bucket] = tgts.Targets
|
||||
}
|
||||
|
||||
// create minio-go clients for buckets having remote targets
|
||||
@@ -258,9 +333,6 @@ func (sys *BucketTargetSys) load(ctx context.Context, buckets []BucketInfo, objA
|
||||
continue
|
||||
}
|
||||
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
||||
if _, ok := sys.clientsCache[tgtClient.EndpointURL().String()]; !ok {
|
||||
sys.clientsCache[tgtClient.EndpointURL().String()] = tgtClient
|
||||
}
|
||||
}
|
||||
sys.targetsMap[bucket.Name] = cfg.Targets
|
||||
}
|
||||
@@ -272,16 +344,14 @@ var getRemoteTargetInstanceTransportOnce sync.Once
|
||||
|
||||
// Returns a minio-go Client configured to access remote host described in replication target config.
|
||||
func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*miniogo.Core, error) {
|
||||
if clnt, ok := sys.clientsCache[tcfg.Endpoint]; ok {
|
||||
return clnt, nil
|
||||
}
|
||||
config := tcfg.Credentials
|
||||
creds := credentials.NewStaticV4(config.AccessKey, config.SecretKey, "")
|
||||
|
||||
getRemoteTargetInstanceTransportOnce.Do(func() {
|
||||
getRemoteTargetInstanceTransport = NewGatewayHTTPTransport()
|
||||
getRemoteTargetInstanceTransport = newGatewayHTTPTransport(1 * time.Hour)
|
||||
})
|
||||
core, err := miniogo.NewCore(tcfg.Endpoint, &miniogo.Options{
|
||||
|
||||
core, err := miniogo.NewCore(tcfg.URL().Host, &miniogo.Options{
|
||||
Creds: creds,
|
||||
Secure: tcfg.Secure,
|
||||
Transport: getRemoteTargetInstanceTransport,
|
||||
@@ -296,18 +366,28 @@ func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTar
|
||||
}
|
||||
tgts := sys.targetsMap[bucket]
|
||||
for _, tgt := range tgts {
|
||||
if tgt.Type == target.Type && tgt.TargetBucket == target.TargetBucket && target.URL() == tgt.URL() {
|
||||
if tgt.Type == target.Type && tgt.TargetBucket == target.TargetBucket && target.URL().String() == tgt.URL().String() {
|
||||
return tgt.Arn
|
||||
}
|
||||
}
|
||||
if !madmin.ServiceType(target.Type).IsValid() {
|
||||
return ""
|
||||
}
|
||||
return generateARN(target)
|
||||
}
|
||||
|
||||
// generate ARN that is unique to this target type
|
||||
func generateARN(t *madmin.BucketTarget) string {
|
||||
hash := sha256.New()
|
||||
hash.Write([]byte(t.Type))
|
||||
hash.Write([]byte(t.Region))
|
||||
hash.Write([]byte(t.TargetBucket))
|
||||
hashSum := hex.EncodeToString(hash.Sum(nil))
|
||||
arn := madmin.ARN{
|
||||
Type: target.Type,
|
||||
ID: mustGetUUID(),
|
||||
Region: target.Region,
|
||||
Bucket: target.TargetBucket,
|
||||
Type: t.Type,
|
||||
ID: hashSum,
|
||||
Region: t.Region,
|
||||
Bucket: t.TargetBucket,
|
||||
}
|
||||
return arn.String()
|
||||
}
|
||||
|
||||
@@ -70,6 +70,14 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
||||
}, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "A replication configuration is present on this bucket, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(v)
|
||||
if err != nil {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -33,6 +34,7 @@ import (
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/certs"
|
||||
@@ -43,9 +45,8 @@ func init() {
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
logger.RegisterError(config.FmtError)
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(GlobalContext)
|
||||
logger.AddTarget(globalConsoleSys)
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
globalDNSCache = xhttp.NewDNSCache(3*time.Second, 10*time.Second)
|
||||
|
||||
gob.Register(StorageErr(""))
|
||||
}
|
||||
@@ -63,10 +64,12 @@ func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
globalCompressConfigMu.Lock()
|
||||
if globalCompressConfig.Enabled && !objAPI.IsCompressionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Compression support is requested but '%s' does not support compression", name)
|
||||
}
|
||||
globalCompressConfigMu.Unlock()
|
||||
}
|
||||
|
||||
// Check for updates and print a notification message
|
||||
@@ -104,11 +107,7 @@ func checkUpdate(mode string) {
|
||||
return
|
||||
}
|
||||
|
||||
if globalInplaceUpdateDisabled {
|
||||
logStartupMessage(updateMsg)
|
||||
} else {
|
||||
logStartupMessage(prepareUpdateMessage("Run `mc admin update`", lrTime.Sub(crTime)))
|
||||
}
|
||||
logStartupMessage(prepareUpdateMessage("Run `mc admin update`", lrTime.Sub(crTime)))
|
||||
}
|
||||
|
||||
func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
|
||||
@@ -338,14 +337,23 @@ func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secu
|
||||
return nil, nil, false, err
|
||||
}
|
||||
for _, file := range files {
|
||||
// We exclude any regular file and the "CAs/" directory.
|
||||
// The "CAs/" directory contains (root) CA certificates
|
||||
// that MinIO adds to its list of trusted roots (tls.Config.RootCAs).
|
||||
// Therefore, "CAs/" does not contain X.509 certificates that
|
||||
// are meant to be served by MinIO.
|
||||
if !file.IsDir() || file.Name() == "CAs" {
|
||||
// Ignore all
|
||||
// - regular files
|
||||
// - "CAs" directory
|
||||
// - any directory which starts with ".."
|
||||
if file.Mode().IsRegular() || file.Name() == "CAs" || strings.HasPrefix(file.Name(), "..") {
|
||||
continue
|
||||
}
|
||||
if file.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
file, err = os.Stat(filepath.Join(root.Name(), file.Name()))
|
||||
if err != nil {
|
||||
// not accessible ignore
|
||||
continue
|
||||
}
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
certFile = filepath.Join(root.Name(), file.Name(), publicCertFile)
|
||||
@@ -354,8 +362,8 @@ func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secu
|
||||
if !isFile(certFile) || !isFile(keyFile) {
|
||||
continue
|
||||
}
|
||||
if err := manager.AddCertificate(certFile, keyFile); err != nil {
|
||||
err = fmt.Errorf("Failed to load TLS certificate '%s': %v", certFile, err)
|
||||
if err = manager.AddCertificate(certFile, keyFile); err != nil {
|
||||
err = fmt.Errorf("Unable to load TLS certificate '%s,%s': %w", certFile, keyFile, err)
|
||||
logger.LogIf(GlobalContext, err, logger.Minio)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -25,8 +26,10 @@ import (
|
||||
"github.com/minio/minio/cmd/config/api"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
"github.com/minio/minio/cmd/config/crawler"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/config/etcd"
|
||||
"github.com/minio/minio/cmd/config/heal"
|
||||
xldap "github.com/minio/minio/cmd/config/identity/ldap"
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
@@ -55,6 +58,8 @@ func initHelp() {
|
||||
config.KmsKesSubSys: crypto.DefaultKesKVS,
|
||||
config.LoggerWebhookSubSys: logger.DefaultKVS,
|
||||
config.AuditWebhookSubSys: logger.DefaultAuditKVS,
|
||||
config.HealSubSys: heal.DefaultKVS,
|
||||
config.CrawlerSubSys: crawler.DefaultKVS,
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
kvs[k] = v
|
||||
@@ -106,6 +111,14 @@ func initHelp() {
|
||||
Key: config.APISubSys,
|
||||
Description: "manage global HTTP API call specific features, such as throttling, authentication types, etc.",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.HealSubSys,
|
||||
Description: "manage object healing frequency and bitrot verification checks",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CrawlerSubSys,
|
||||
Description: "manage crawling for usage calculation, lifecycle, healing and more",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.LoggerWebhookSubSys,
|
||||
Description: "send server logs to webhook endpoints",
|
||||
@@ -185,6 +198,8 @@ func initHelp() {
|
||||
config.EtcdSubSys: etcd.Help,
|
||||
config.CacheSubSys: cache.Help,
|
||||
config.CompressionSubSys: compress.Help,
|
||||
config.HealSubSys: heal.Help,
|
||||
config.CrawlerSubSys: crawler.Help,
|
||||
config.IdentityOpenIDSubSys: openid.Help,
|
||||
config.IdentityLDAPSubSys: xldap.Help,
|
||||
config.PolicyOPASubSys: opa.Help,
|
||||
@@ -214,6 +229,9 @@ var (
|
||||
)
|
||||
|
||||
func validateConfig(s config.Config, setDriveCount int) error {
|
||||
// We must have a global lock for this so nobody else modifies env while we do.
|
||||
defer env.LockSetEnv()()
|
||||
|
||||
// Disable merging env values with config for validation.
|
||||
env.SetEnvOff()
|
||||
|
||||
@@ -242,7 +260,22 @@ func validateConfig(s config.Config, setDriveCount int) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default]); err != nil {
|
||||
compCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI != nil {
|
||||
if compCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := heal.LookupConfig(s[config.HealSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := crawler.LookupConfig(s[config.CrawlerSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -398,6 +431,11 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
|
||||
globalAPIConfig.init(apiConfig, setDriveCount)
|
||||
|
||||
// Initialize remote instance transport once.
|
||||
getRemoteInstanceTransportOnce.Do(func() {
|
||||
getRemoteInstanceTransport = newGatewayHTTPTransport(apiConfig.RemoteTransportDeadline)
|
||||
})
|
||||
|
||||
if globalIsErasure {
|
||||
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
|
||||
if err != nil {
|
||||
@@ -435,15 +473,10 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
|
||||
// Enable auto-encryption if enabled
|
||||
globalAutoEncryption = kmsCfg.AutoEncryption
|
||||
if globalAutoEncryption {
|
||||
if globalAutoEncryption && !globalIsGateway {
|
||||
logger.LogIf(ctx, fmt.Errorf("%s env is deprecated please migrate to using `mc encrypt` at bucket level", crypto.EnvKMSAutoEncryption))
|
||||
}
|
||||
|
||||
globalCompressConfig, err = compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to setup Compression: %w", err))
|
||||
}
|
||||
|
||||
globalOpenIDConfig, err = openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody)
|
||||
if err != nil {
|
||||
@@ -473,11 +506,13 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize logger: %w", err))
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.HTTP {
|
||||
for k, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
// Enable http logging
|
||||
if err = logger.AddTarget(
|
||||
http.New(http.WithEndpoint(l.Endpoint),
|
||||
http.New(
|
||||
http.WithTargetName(k),
|
||||
http.WithEndpoint(l.Endpoint),
|
||||
http.WithAuthToken(l.AuthToken),
|
||||
http.WithUserAgent(loggerUserAgent),
|
||||
http.WithLogKind(string(logger.All)),
|
||||
@@ -489,11 +524,13 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.Audit {
|
||||
for k, l := range loggerCfg.Audit {
|
||||
if l.Enabled {
|
||||
// Enable http audit logging
|
||||
if err = logger.AddAuditTarget(
|
||||
http.New(http.WithEndpoint(l.Endpoint),
|
||||
http.New(
|
||||
http.WithTargetName(k),
|
||||
http.WithEndpoint(l.Endpoint),
|
||||
http.WithAuthToken(l.AuthToken),
|
||||
http.WithUserAgent(loggerUserAgent),
|
||||
http.WithLogKind(string(logger.All)),
|
||||
@@ -514,6 +551,68 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
|
||||
}
|
||||
|
||||
// Apply dynamic config values
|
||||
logger.LogIf(ctx, applyDynamicConfig(ctx, s))
|
||||
}
|
||||
|
||||
// applyDynamicConfig will apply dynamic config values.
|
||||
// Dynamic systems should be in config.SubSystemsDynamic as well.
|
||||
func applyDynamicConfig(ctx context.Context, s config.Config) error {
|
||||
// Read all dynamic configs.
|
||||
// API
|
||||
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
|
||||
// Compression
|
||||
cmpCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to setup Compression: %w", err)
|
||||
}
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI != nil {
|
||||
if cmpCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
}
|
||||
}
|
||||
|
||||
// Heal
|
||||
healCfg, err := heal.LookupConfig(s[config.HealSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to apply heal config: %w", err))
|
||||
}
|
||||
|
||||
// Crawler
|
||||
crawlerCfg, err := crawler.LookupConfig(s[config.CrawlerSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply crawler config: %w", err)
|
||||
}
|
||||
|
||||
// Apply configurations.
|
||||
// We should not fail after this.
|
||||
globalAPIConfig.init(apiConfig, globalAPIConfig.setDriveCount)
|
||||
|
||||
globalCompressConfigMu.Lock()
|
||||
globalCompressConfig = cmpCfg
|
||||
globalCompressConfigMu.Unlock()
|
||||
|
||||
globalHealConfigMu.Lock()
|
||||
globalHealConfig = healCfg
|
||||
globalHealConfigMu.Unlock()
|
||||
|
||||
logger.LogIf(ctx, crawlerSleeper.Update(crawlerCfg.Delay, crawlerCfg.MaxWait))
|
||||
|
||||
// Update all dynamic config values in memory.
|
||||
globalServerConfigMu.Lock()
|
||||
defer globalServerConfigMu.Unlock()
|
||||
if globalServerConfig != nil {
|
||||
for k := range config.SubSystemsDynamic {
|
||||
globalServerConfig[k] = s[k]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Help - return sub-system level help
|
||||
|
||||
@@ -87,7 +87,7 @@ func mkdirAllIgnorePerm(path string) error {
|
||||
if err != nil {
|
||||
// It is possible in kubernetes like deployments this directory
|
||||
// is already mounted and is not writable, ignore any write errors.
|
||||
if os.IsPermission(err) {
|
||||
if osIsPermission(err) {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
@@ -28,7 +27,7 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
etcd "go.etcd.io/etcd/v3/clientv3"
|
||||
etcd "go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
func handleEncryptedConfigBackend(objAPI ObjectLayer) error {
|
||||
@@ -194,7 +193,7 @@ func migrateIAMConfigsEtcdToEncrypted(ctx context.Context, client *etcd.Client)
|
||||
// Config is already encrypted with right keys
|
||||
continue
|
||||
}
|
||||
return errors.New("config data not in plain-text form or encrypted")
|
||||
return fmt.Errorf("Decrypting config failed %w, possibly credentials are incorrect", err)
|
||||
}
|
||||
|
||||
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
|
||||
@@ -274,7 +273,7 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
|
||||
// Config is already encrypted with right keys
|
||||
continue
|
||||
}
|
||||
return errors.New("config data not in plain-text form or encrypted")
|
||||
return fmt.Errorf("Decrypting config failed %w, possibly credentials are incorrect", err)
|
||||
}
|
||||
|
||||
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
|
||||
|
||||
@@ -74,7 +74,7 @@ func migrateConfig() error {
|
||||
// Load only config version information.
|
||||
version, err := GetVersion(getConfigFile())
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
@@ -243,7 +243,7 @@ func purgeV1() error {
|
||||
|
||||
cv1 := &configV1{}
|
||||
_, err := Load(configFile, cv1)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘1’. %w", err)
|
||||
@@ -264,7 +264,7 @@ func migrateV2ToV3() error {
|
||||
|
||||
cv2 := &configV2{}
|
||||
_, err := Load(configFile, cv2)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘2’. %w", err)
|
||||
@@ -323,7 +323,7 @@ func migrateV3ToV4() error {
|
||||
|
||||
cv3 := &configV3{}
|
||||
_, err := Load(configFile, cv3)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘3’. %w", err)
|
||||
@@ -361,7 +361,7 @@ func migrateV4ToV5() error {
|
||||
|
||||
cv4 := &configV4{}
|
||||
_, err := Load(configFile, cv4)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘4’. %w", err)
|
||||
@@ -402,7 +402,7 @@ func migrateV5ToV6() error {
|
||||
|
||||
cv5 := &configV5{}
|
||||
_, err := Load(configFile, cv5)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘5’. %w", err)
|
||||
@@ -491,7 +491,7 @@ func migrateV6ToV7() error {
|
||||
|
||||
cv6 := &configV6{}
|
||||
_, err := Load(configFile, cv6)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘6’. %w", err)
|
||||
@@ -547,7 +547,7 @@ func migrateV7ToV8() error {
|
||||
|
||||
cv7 := &serverConfigV7{}
|
||||
_, err := Load(configFile, cv7)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘7’. %w", err)
|
||||
@@ -609,7 +609,7 @@ func migrateV8ToV9() error {
|
||||
|
||||
cv8 := &serverConfigV8{}
|
||||
_, err := Load(configFile, cv8)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘8’. %w", err)
|
||||
@@ -679,7 +679,7 @@ func migrateV9ToV10() error {
|
||||
|
||||
cv9 := &serverConfigV9{}
|
||||
_, err := Load(configFile, cv9)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘9’. %w", err)
|
||||
@@ -747,7 +747,7 @@ func migrateV10ToV11() error {
|
||||
|
||||
cv10 := &serverConfigV10{}
|
||||
_, err := Load(configFile, cv10)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘10’. %w", err)
|
||||
@@ -818,7 +818,7 @@ func migrateV11ToV12() error {
|
||||
|
||||
cv11 := &serverConfigV11{}
|
||||
_, err := Load(configFile, cv11)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘11’. %w", err)
|
||||
@@ -915,7 +915,7 @@ func migrateV12ToV13() error {
|
||||
|
||||
cv12 := &serverConfigV12{}
|
||||
_, err := Load(configFile, cv12)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘12’. %w", err)
|
||||
@@ -995,7 +995,7 @@ func migrateV13ToV14() error {
|
||||
|
||||
cv13 := &serverConfigV13{}
|
||||
_, err := Load(configFile, cv13)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘13’. %w", err)
|
||||
@@ -1080,7 +1080,7 @@ func migrateV14ToV15() error {
|
||||
|
||||
cv14 := &serverConfigV14{}
|
||||
_, err := Load(configFile, cv14)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘14’. %w", err)
|
||||
@@ -1170,7 +1170,7 @@ func migrateV15ToV16() error {
|
||||
|
||||
cv15 := &serverConfigV15{}
|
||||
_, err := Load(configFile, cv15)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘15’. %w", err)
|
||||
@@ -1260,7 +1260,7 @@ func migrateV16ToV17() error {
|
||||
|
||||
cv16 := &serverConfigV16{}
|
||||
_, err := Load(configFile, cv16)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘16’. %w", err)
|
||||
@@ -1381,7 +1381,7 @@ func migrateV17ToV18() error {
|
||||
|
||||
cv17 := &serverConfigV17{}
|
||||
_, err := Load(configFile, cv17)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘17’. %w", err)
|
||||
@@ -1483,7 +1483,7 @@ func migrateV18ToV19() error {
|
||||
|
||||
cv18 := &serverConfigV18{}
|
||||
_, err := Load(configFile, cv18)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘18’. %w", err)
|
||||
@@ -1589,7 +1589,7 @@ func migrateV19ToV20() error {
|
||||
|
||||
cv19 := &serverConfigV19{}
|
||||
_, err := Load(configFile, cv19)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘18’. %w", err)
|
||||
@@ -1694,7 +1694,7 @@ func migrateV20ToV21() error {
|
||||
|
||||
cv20 := &serverConfigV20{}
|
||||
_, err := Load(configFile, cv20)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘20’. %w", err)
|
||||
@@ -1798,7 +1798,7 @@ func migrateV21ToV22() error {
|
||||
|
||||
cv21 := &serverConfigV21{}
|
||||
_, err := Load(configFile, cv21)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘21’. %w", err)
|
||||
@@ -1902,7 +1902,7 @@ func migrateV22ToV23() error {
|
||||
|
||||
cv22 := &serverConfigV22{}
|
||||
_, err := Load(configFile, cv22)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘22’. %w", err)
|
||||
@@ -2015,7 +2015,7 @@ func migrateV23ToV24() error {
|
||||
|
||||
cv23 := &serverConfigV23{}
|
||||
_, err := quick.LoadConfig(configFile, globalEtcdClient, cv23)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘23’. %w", err)
|
||||
@@ -2128,7 +2128,7 @@ func migrateV24ToV25() error {
|
||||
|
||||
cv24 := &serverConfigV24{}
|
||||
_, err := quick.LoadConfig(configFile, globalEtcdClient, cv24)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘24’. %w", err)
|
||||
@@ -2246,7 +2246,7 @@ func migrateV25ToV26() error {
|
||||
|
||||
cv25 := &serverConfigV25{}
|
||||
_, err := quick.LoadConfig(configFile, globalEtcdClient, cv25)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘25’. %w", err)
|
||||
@@ -2368,7 +2368,7 @@ func migrateV26ToV27() error {
|
||||
// in the new `logger` field
|
||||
srvConfig := &serverConfigV27{}
|
||||
_, err := quick.LoadConfig(configFile, globalEtcdClient, srvConfig)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %w", err)
|
||||
@@ -2401,7 +2401,7 @@ func migrateV27ToV28() error {
|
||||
|
||||
srvConfig := &serverConfigV28{}
|
||||
_, err := quick.LoadConfig(configFile, globalEtcdClient, srvConfig)
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %w", err)
|
||||
@@ -2452,17 +2452,17 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
var config = &serverConfigV27{}
|
||||
for _, cfgFile := range configFiles {
|
||||
if _, err = Load(cfgFile, config); err != nil {
|
||||
if !os.IsNotExist(err) && !os.IsPermission(err) {
|
||||
if !osIsNotExist(err) && !osIsPermission(err) {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if os.IsPermission(err) {
|
||||
if osIsPermission(err) {
|
||||
logger.Info("Older config found but not readable %s, proceeding to initialize new config anyways", err)
|
||||
}
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
if osIsNotExist(err) || osIsPermission(err) {
|
||||
// Initialize the server config, if no config exists.
|
||||
return newSrvConfig(objAPI)
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ func TestServerConfigMigrateV1(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check if config v1 is removed from filesystem
|
||||
if _, err := os.Stat(configPath); err == nil || !os.IsNotExist(err) {
|
||||
if _, err := os.Stat(configPath); err == nil || !osIsNotExist(err) {
|
||||
t.Fatal("Config V1 file is not purged")
|
||||
}
|
||||
|
||||
|
||||
@@ -29,15 +29,28 @@ import (
|
||||
|
||||
// API sub-system constants
|
||||
const (
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiReadyDeadline = "ready_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiClusterDeadline = "cluster_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRemoteTransportDeadline = "remote_transport_deadline"
|
||||
apiListQuorum = "list_quorum"
|
||||
apiExtendListCacheLife = "extend_list_cache_life"
|
||||
|
||||
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
|
||||
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
|
||||
EnvAPIReadyDeadline = "MINIO_API_READY_DEADLINE"
|
||||
EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN"
|
||||
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
|
||||
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
|
||||
EnvAPIClusterDeadline = "MINIO_API_CLUSTER_DEADLINE"
|
||||
EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN"
|
||||
EnvAPIRemoteTransportDeadline = "MINIO_API_REMOTE_TRANSPORT_DEADLINE"
|
||||
EnvAPIListQuorum = "MINIO_API_LIST_QUORUM"
|
||||
EnvAPIExtendListCacheLife = "MINIO_API_EXTEND_LIST_CACHE_LIFE"
|
||||
EnvAPISecureCiphers = "MINIO_API_SECURE_CIPHERS"
|
||||
)
|
||||
|
||||
// Deprecated key and ENVs
|
||||
const (
|
||||
apiReadyDeadline = "ready_deadline"
|
||||
EnvAPIReadyDeadline = "MINIO_API_READY_DEADLINE"
|
||||
)
|
||||
|
||||
// DefaultKVS - default storage class config
|
||||
@@ -52,22 +65,37 @@ var (
|
||||
Value: "10s",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiReadyDeadline,
|
||||
Key: apiClusterDeadline,
|
||||
Value: "10s",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiCorsAllowOrigin,
|
||||
Value: "*",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiRemoteTransportDeadline,
|
||||
Value: "2h",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiListQuorum,
|
||||
Value: "optimal",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiExtendListCacheLife,
|
||||
Value: "0s",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// Config storage class configuration
|
||||
type Config struct {
|
||||
APIRequestsMax int `json:"requests_max"`
|
||||
APIRequestsDeadline time.Duration `json:"requests_deadline"`
|
||||
APIReadyDeadline time.Duration `json:"ready_deadline"`
|
||||
APICorsAllowOrigin []string `json:"cors_allow_origin"`
|
||||
RequestsMax int `json:"requests_max"`
|
||||
RequestsDeadline time.Duration `json:"requests_deadline"`
|
||||
ClusterDeadline time.Duration `json:"cluster_deadline"`
|
||||
CorsAllowOrigin []string `json:"cors_allow_origin"`
|
||||
RemoteTransportDeadline time.Duration `json:"remote_transport_deadline"`
|
||||
ListQuorum string `json:"list_strict_quorum"`
|
||||
ExtendListLife time.Duration `json:"extend_list_cache_life"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
|
||||
@@ -81,8 +109,29 @@ func (sCfg *Config) UnmarshalJSON(data []byte) error {
|
||||
return json.Unmarshal(data, &aux)
|
||||
}
|
||||
|
||||
// GetListQuorum interprets list quorum values and returns appropriate
|
||||
// acceptable quorum expected for list operations
|
||||
func (sCfg Config) GetListQuorum() int {
|
||||
switch sCfg.ListQuorum {
|
||||
case "optimal":
|
||||
return 3
|
||||
case "reduced":
|
||||
return 2
|
||||
case "disk":
|
||||
// smallest possible value, generally meant for testing.
|
||||
return 1
|
||||
case "strict":
|
||||
return -1
|
||||
}
|
||||
// Defaults to 3 drives per set.
|
||||
return 3
|
||||
}
|
||||
|
||||
// LookupConfig - lookup api config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
// remove this since we have removed this already.
|
||||
kvs.Delete(apiReadyDeadline)
|
||||
|
||||
if err = config.CheckValidKeys(config.APISubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
@@ -102,16 +151,37 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
readyDeadline, err := time.ParseDuration(env.Get(EnvAPIReadyDeadline, kvs.Get(apiReadyDeadline)))
|
||||
clusterDeadline, err := time.ParseDuration(env.Get(EnvAPIClusterDeadline, kvs.Get(apiClusterDeadline)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
corsAllowOrigin := strings.Split(env.Get(EnvAPICorsAllowOrigin, kvs.Get(apiCorsAllowOrigin)), ",")
|
||||
|
||||
remoteTransportDeadline, err := time.ParseDuration(env.Get(EnvAPIRemoteTransportDeadline, kvs.Get(apiRemoteTransportDeadline)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
listQuorum := env.Get(EnvAPIListQuorum, kvs.Get(apiListQuorum))
|
||||
switch listQuorum {
|
||||
case "strict", "optimal", "reduced", "disk":
|
||||
default:
|
||||
return cfg, errors.New("invalid value for list strict quorum")
|
||||
}
|
||||
|
||||
listLife, err := time.ParseDuration(env.Get(EnvAPIExtendListCacheLife, kvs.Get(apiExtendListCacheLife)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
return Config{
|
||||
APIRequestsMax: requestsMax,
|
||||
APIRequestsDeadline: requestsDeadline,
|
||||
APIReadyDeadline: readyDeadline,
|
||||
APICorsAllowOrigin: corsAllowOrigin,
|
||||
RequestsMax: requestsMax,
|
||||
RequestsDeadline: requestsDeadline,
|
||||
ClusterDeadline: clusterDeadline,
|
||||
CorsAllowOrigin: corsAllowOrigin,
|
||||
RemoteTransportDeadline: remoteTransportDeadline,
|
||||
ListQuorum: listQuorum,
|
||||
ExtendListLife: listLife,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -39,5 +39,11 @@ var (
|
||||
Optional: true,
|
||||
Type: "csv",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: apiRemoteTransportDeadline,
|
||||
Description: `set the deadline for API requests on remote transports while proxying between federated instances e.g. "2h"`,
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
31
cmd/config/cache/config.go
vendored
31
cmd/config/cache/config.go
vendored
@@ -28,16 +28,17 @@ import (
|
||||
|
||||
// Config represents cache config settings
|
||||
type Config struct {
|
||||
Enabled bool `json:"-"`
|
||||
Drives []string `json:"drives"`
|
||||
Expiry int `json:"expiry"`
|
||||
MaxUse int `json:"maxuse"`
|
||||
Quota int `json:"quota"`
|
||||
Exclude []string `json:"exclude"`
|
||||
After int `json:"after"`
|
||||
WatermarkLow int `json:"watermark_low"`
|
||||
WatermarkHigh int `json:"watermark_high"`
|
||||
Range bool `json:"range"`
|
||||
Enabled bool `json:"-"`
|
||||
Drives []string `json:"drives"`
|
||||
Expiry int `json:"expiry"`
|
||||
MaxUse int `json:"maxuse"`
|
||||
Quota int `json:"quota"`
|
||||
Exclude []string `json:"exclude"`
|
||||
After int `json:"after"`
|
||||
WatermarkLow int `json:"watermark_low"`
|
||||
WatermarkHigh int `json:"watermark_high"`
|
||||
Range bool `json:"range"`
|
||||
CommitWriteback bool `json:"-"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON - implements JSON unmarshal interface for unmarshalling
|
||||
@@ -152,3 +153,13 @@ func parseCacheExcludes(excludes string) ([]string, error) {
|
||||
|
||||
return excludesSlice, nil
|
||||
}
|
||||
|
||||
func parseCacheCommitMode(commitStr string) (bool, error) {
|
||||
switch strings.ToLower(commitStr) {
|
||||
case "writeback":
|
||||
return true, nil
|
||||
case "writethrough":
|
||||
return false, nil
|
||||
}
|
||||
return false, config.ErrInvalidCacheCommitValue(nil).Msg("cache commit value must be `writeback` or `writethrough`")
|
||||
}
|
||||
|
||||
6
cmd/config/cache/help.go
vendored
6
cmd/config/cache/help.go
vendored
@@ -74,5 +74,11 @@ var (
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: Commit,
|
||||
Description: `set to control cache commit behavior, defaults to "writethrough"`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
17
cmd/config/cache/lookup.go
vendored
17
cmd/config/cache/lookup.go
vendored
@@ -35,6 +35,7 @@ const (
|
||||
WatermarkLow = "watermark_low"
|
||||
WatermarkHigh = "watermark_high"
|
||||
Range = "range"
|
||||
Commit = "commit"
|
||||
|
||||
EnvCacheDrives = "MINIO_CACHE_DRIVES"
|
||||
EnvCacheExclude = "MINIO_CACHE_EXCLUDE"
|
||||
@@ -45,6 +46,7 @@ const (
|
||||
EnvCacheWatermarkLow = "MINIO_CACHE_WATERMARK_LOW"
|
||||
EnvCacheWatermarkHigh = "MINIO_CACHE_WATERMARK_HIGH"
|
||||
EnvCacheRange = "MINIO_CACHE_RANGE"
|
||||
EnvCacheCommit = "MINIO_CACHE_COMMIT"
|
||||
|
||||
EnvCacheEncryptionMasterKey = "MINIO_CACHE_ENCRYPTION_MASTER_KEY"
|
||||
|
||||
@@ -53,6 +55,7 @@ const (
|
||||
DefaultAfter = "0"
|
||||
DefaultWaterMarkLow = "70"
|
||||
DefaultWaterMarkHigh = "80"
|
||||
DefaultCacheCommit = "writethrough"
|
||||
)
|
||||
|
||||
// DefaultKVS - default KV settings for caching.
|
||||
@@ -90,6 +93,10 @@ var (
|
||||
Key: Range,
|
||||
Value: config.EnableOn,
|
||||
},
|
||||
config.KV{
|
||||
Key: Commit,
|
||||
Value: DefaultCacheCommit,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -210,6 +217,16 @@ func LookupConfig(kvs config.KVS) (Config, error) {
|
||||
}
|
||||
cfg.Range = rng
|
||||
}
|
||||
if commit := env.Get(EnvCacheCommit, kvs.Get(Commit)); commit != "" {
|
||||
cfg.CommitWriteback, err = parseCacheCommitMode(commit)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
if cfg.After > 0 && cfg.CommitWriteback {
|
||||
err := errors.New("cache after cannot be used with commit writeback")
|
||||
return cfg, config.ErrInvalidCacheSetting(err)
|
||||
}
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
@@ -76,6 +76,8 @@ const (
|
||||
KmsKesSubSys = "kms_kes"
|
||||
LoggerWebhookSubSys = "logger_webhook"
|
||||
AuditWebhookSubSys = "audit_webhook"
|
||||
HealSubSys = "heal"
|
||||
CrawlerSubSys = "crawler"
|
||||
|
||||
// Add new constants here if you add new fields to config.
|
||||
)
|
||||
@@ -97,7 +99,7 @@ const (
|
||||
)
|
||||
|
||||
// SubSystems - all supported sub-systems
|
||||
var SubSystems = set.CreateStringSet([]string{
|
||||
var SubSystems = set.CreateStringSet(
|
||||
CredentialsSubSys,
|
||||
RegionSubSys,
|
||||
EtcdSubSys,
|
||||
@@ -112,6 +114,8 @@ var SubSystems = set.CreateStringSet([]string{
|
||||
PolicyOPASubSys,
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
CrawlerSubSys,
|
||||
HealSubSys,
|
||||
NotifyAMQPSubSys,
|
||||
NotifyESSubSys,
|
||||
NotifyKafkaSubSys,
|
||||
@@ -122,7 +126,15 @@ var SubSystems = set.CreateStringSet([]string{
|
||||
NotifyPostgresSubSys,
|
||||
NotifyRedisSubSys,
|
||||
NotifyWebhookSubSys,
|
||||
}...)
|
||||
)
|
||||
|
||||
// SubSystemsDynamic - all sub-systems that have dynamic config.
|
||||
var SubSystemsDynamic = set.CreateStringSet(
|
||||
APISubSys,
|
||||
CompressionSubSys,
|
||||
CrawlerSubSys,
|
||||
HealSubSys,
|
||||
)
|
||||
|
||||
// SubSystemsSingleTargets - subsystems which only support single target.
|
||||
var SubSystemsSingleTargets = set.CreateStringSet([]string{
|
||||
@@ -138,6 +150,8 @@ var SubSystemsSingleTargets = set.CreateStringSet([]string{
|
||||
PolicyOPASubSys,
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
HealSubSys,
|
||||
CrawlerSubSys,
|
||||
}...)
|
||||
|
||||
// Constant separators
|
||||
@@ -264,6 +278,16 @@ func (kvs KVS) Get(key string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Delete - deletes the key if present from the KV list.
|
||||
func (kvs *KVS) Delete(key string) {
|
||||
for i, kv := range *kvs {
|
||||
if kv.Key == key {
|
||||
*kvs = append((*kvs)[:i], (*kvs)[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup - lookup a key in a list of KVS
|
||||
func (kvs KVS) Lookup(key string) (string, bool) {
|
||||
for _, kv := range kvs {
|
||||
@@ -296,25 +320,29 @@ func (c Config) DelFrom(r io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadFrom - implements io.ReaderFrom interface
|
||||
func (c Config) ReadFrom(r io.Reader) (int64, error) {
|
||||
// ReadConfig - read content from input and write into c.
|
||||
// Returns whether all parameters were dynamic.
|
||||
func (c Config) ReadConfig(r io.Reader) (dynOnly bool, err error) {
|
||||
var n int
|
||||
scanner := bufio.NewScanner(r)
|
||||
dynOnly = true
|
||||
for scanner.Scan() {
|
||||
// Skip any empty lines, or comment like characters
|
||||
text := scanner.Text()
|
||||
if text == "" || strings.HasPrefix(text, KvComment) {
|
||||
continue
|
||||
}
|
||||
if err := c.SetKVS(text, DefaultKVS); err != nil {
|
||||
return 0, err
|
||||
dynamic, err := c.SetKVS(text, DefaultKVS)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
dynOnly = dynOnly && dynamic
|
||||
n += len(text)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return 0, err
|
||||
return false, err
|
||||
}
|
||||
return int64(n), nil
|
||||
return dynOnly, nil
|
||||
}
|
||||
|
||||
type configWriteTo struct {
|
||||
@@ -605,26 +633,27 @@ func (c Config) Clone() Config {
|
||||
}
|
||||
|
||||
// SetKVS - set specific key values per sub-system.
|
||||
func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
|
||||
func (c Config) SetKVS(s string, defaultKVS map[string]KVS) (dynamic bool, err error) {
|
||||
if len(s) == 0 {
|
||||
return Errorf("input arguments cannot be empty")
|
||||
return false, Errorf("input arguments cannot be empty")
|
||||
}
|
||||
inputs := strings.SplitN(s, KvSpaceSeparator, 2)
|
||||
if len(inputs) <= 1 {
|
||||
return Errorf("invalid number of arguments '%s'", s)
|
||||
return false, Errorf("invalid number of arguments '%s'", s)
|
||||
}
|
||||
subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2)
|
||||
if len(subSystemValue) == 0 {
|
||||
return Errorf("invalid number of arguments %s", s)
|
||||
return false, Errorf("invalid number of arguments %s", s)
|
||||
}
|
||||
|
||||
if !SubSystems.Contains(subSystemValue[0]) {
|
||||
return Errorf("unknown sub-system %s", s)
|
||||
return false, Errorf("unknown sub-system %s", s)
|
||||
}
|
||||
|
||||
if SubSystemsSingleTargets.Contains(subSystemValue[0]) && len(subSystemValue) == 2 {
|
||||
return Errorf("sub-system '%s' only supports single target", subSystemValue[0])
|
||||
return false, Errorf("sub-system '%s' only supports single target", subSystemValue[0])
|
||||
}
|
||||
dynamic = SubSystemsDynamic.Contains(subSystemValue[0])
|
||||
|
||||
tgt := Default
|
||||
subSys := subSystemValue[0]
|
||||
@@ -634,7 +663,7 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
|
||||
|
||||
fields := madmin.KvFields(inputs[1], defaultKVS[subSys].Keys())
|
||||
if len(fields) == 0 {
|
||||
return Errorf("sub-system '%s' cannot have empty keys", subSys)
|
||||
return false, Errorf("sub-system '%s' cannot have empty keys", subSys)
|
||||
}
|
||||
|
||||
var kvs = KVS{}
|
||||
@@ -657,7 +686,7 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
|
||||
kvs.Set(prevK, madmin.SanitizeValue(kv[1]))
|
||||
continue
|
||||
}
|
||||
return Errorf("key '%s', cannot have empty value", kv[0])
|
||||
return false, Errorf("key '%s', cannot have empty value", kv[0])
|
||||
}
|
||||
|
||||
_, ok := kvs.Lookup(Enable)
|
||||
@@ -707,11 +736,11 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
|
||||
// Return error only if the
|
||||
// key is enabled, for state=off
|
||||
// let it be empty.
|
||||
return Errorf(
|
||||
return false, Errorf(
|
||||
"'%s' is not optional for '%s' sub-system, please check '%s' documentation",
|
||||
hkv.Key, subSys, subSys)
|
||||
}
|
||||
}
|
||||
c[subSys][tgt] = currKVS
|
||||
return nil
|
||||
return dynamic, nil
|
||||
}
|
||||
|
||||
88
cmd/config/crawler/crawler.go
Normal file
88
cmd/config/crawler/crawler.go
Normal file
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package crawler
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
// Compression environment variables
|
||||
const (
|
||||
Delay = "delay"
|
||||
MaxWait = "max_wait"
|
||||
|
||||
EnvDelay = "MINIO_CRAWLER_DELAY"
|
||||
EnvMaxWait = "MINIO_CRAWLER_MAX_WAIT"
|
||||
)
|
||||
|
||||
// Config represents the heal settings.
|
||||
type Config struct {
|
||||
// Delay is the sleep multiplier.
|
||||
Delay float64 `json:"delay"`
|
||||
// MaxWait is maximum wait time between operations
|
||||
MaxWait time.Duration
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultKVS - default KV config for heal settings
|
||||
DefaultKVS = config.KVS{
|
||||
config.KV{
|
||||
Key: Delay,
|
||||
Value: "10",
|
||||
},
|
||||
config.KV{
|
||||
Key: MaxWait,
|
||||
Value: "15s",
|
||||
},
|
||||
}
|
||||
|
||||
// Help provides help for config values
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: Delay,
|
||||
Description: `crawler delay multiplier, defaults to '10.0'`,
|
||||
Optional: true,
|
||||
Type: "float",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: MaxWait,
|
||||
Description: `maximum wait time between operations, defaults to '15s'`,
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// LookupConfig - lookup config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
if err = config.CheckValidKeys(config.CrawlerSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
cfg.Delay, err = strconv.ParseFloat(env.Get(EnvDelay, kvs.Get(Delay)), 64)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
cfg.MaxWait, err = time.ParseDuration(env.Get(EnvMaxWait, kvs.Get(MaxWait)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
|
||||
"github.com/coredns/coredns/plugin/etcd/msg"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"go.etcd.io/etcd/v3/clientv3"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
// ErrNoEntriesFound - Indicates no entries were found for the given key (directory)
|
||||
|
||||
@@ -22,10 +22,12 @@ import (
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
@@ -78,28 +80,39 @@ func (c *OperatorDNS) Put(bucket string) error {
|
||||
defer cancel()
|
||||
e, err := c.endpoint(bucket, false)
|
||||
if err != nil {
|
||||
return err
|
||||
return newError(bucket, err)
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
return newError(bucket, err)
|
||||
}
|
||||
if err = c.addAuthHeader(req); err != nil {
|
||||
return err
|
||||
return newError(bucket, err)
|
||||
}
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
if derr := c.Delete(bucket); derr != nil {
|
||||
return derr
|
||||
return newError(bucket, derr)
|
||||
}
|
||||
}
|
||||
var errorStringBuilder strings.Builder
|
||||
io.Copy(&errorStringBuilder, io.LimitReader(resp.Body, resp.ContentLength))
|
||||
xhttp.DrainBody(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("request to create the service for bucket %s, failed with status %s", bucket, resp.Status)
|
||||
errorString := errorStringBuilder.String()
|
||||
return newError(bucket, fmt.Errorf("service create for bucket %s, failed with status %s, error %s", bucket, resp.Status, errorString))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newError(bucket string, err error) error {
|
||||
e := Error{bucket, err}
|
||||
if strings.Contains(err.Error(), "invalid bucket name") {
|
||||
return ErrInvalidBucketName(e)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// Delete - Removes DNS entries added in Put().
|
||||
func (c *OperatorDNS) Delete(bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultOperatorContextTimeout)
|
||||
|
||||
@@ -16,6 +16,22 @@
|
||||
|
||||
package dns
|
||||
|
||||
// Error - DNS related errors error.
|
||||
type Error struct {
|
||||
Bucket string
|
||||
Err error
|
||||
}
|
||||
|
||||
// ErrInvalidBucketName for buckets with invalid name
|
||||
type ErrInvalidBucketName Error
|
||||
|
||||
func (e ErrInvalidBucketName) Error() string {
|
||||
return "invalid bucket name error: " + e.Err.Error()
|
||||
}
|
||||
func (e Error) Error() string {
|
||||
return "dns related error: " + e.Err.Error()
|
||||
}
|
||||
|
||||
// Store dns record store
|
||||
type Store interface {
|
||||
Put(bucket string) error
|
||||
|
||||
@@ -19,7 +19,6 @@ package config
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
@@ -111,18 +110,16 @@ func ErrorToErr(err error) Err {
|
||||
case *net.OpError:
|
||||
return ErrPortAccess(err).Msg("Insufficient permissions to use specified port")
|
||||
}
|
||||
return ErrNoPermissionsToAccessDirFiles(err).Msg("Insufficient permissions to access path")
|
||||
} else if errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
return ErrUnexpectedDataContent(err)
|
||||
} else {
|
||||
// Failed to identify what type of error this, return a simple UI error
|
||||
return Err{msg: err.Error()}
|
||||
}
|
||||
|
||||
// Failed to identify what type of error this, return a simple UI error
|
||||
return Err{msg: err.Error()}
|
||||
}
|
||||
|
||||
// FmtError converts a fatal error message to a more clear error
|
||||
// using some colors
|
||||
func FmtError(introMsg string, err error, jsonFlag bool) string {
|
||||
|
||||
renderedTxt := ""
|
||||
uiErr := ErrorToErr(err)
|
||||
// JSON print
|
||||
|
||||
@@ -102,6 +102,17 @@ var (
|
||||
"MINIO_CACHE_RANGE: Valid expected value is `on` or `off`",
|
||||
)
|
||||
|
||||
ErrInvalidCacheCommitValue = newErrFn(
|
||||
"Invalid cache commit value",
|
||||
"Please check the passed value",
|
||||
"MINIO_CACHE_COMMIT: Valid expected value is `writeback` or `writethrough`",
|
||||
)
|
||||
|
||||
ErrInvalidCacheSetting = newErrFn(
|
||||
"Incompatible cache setting",
|
||||
"Please check the passed value",
|
||||
"MINIO_CACHE_AFTER cannot be used with MINIO_CACHE_COMMIT setting",
|
||||
)
|
||||
ErrInvalidRotatingCredentialsBackendEncrypted = newErrFn(
|
||||
"Invalid rotating credentials",
|
||||
"Please set correct rotating credentials in the environment for decryption",
|
||||
@@ -205,12 +216,6 @@ Example 1:
|
||||
`Use 'sudo setcap cap_net_bind_service=+ep /path/to/minio' to provide sufficient permissions`,
|
||||
)
|
||||
|
||||
ErrNoPermissionsToAccessDirFiles = newErrFn(
|
||||
"Missing permissions to access the specified path",
|
||||
"Please ensure the specified path can be accessed",
|
||||
"",
|
||||
)
|
||||
|
||||
ErrSSLUnexpectedError = newErrFn(
|
||||
"Invalid TLS certificate",
|
||||
"Please check the content of your certificate data",
|
||||
@@ -247,12 +252,6 @@ Example 1:
|
||||
"",
|
||||
)
|
||||
|
||||
ErrUnexpectedDataContent = newErrFn(
|
||||
"Unexpected data content",
|
||||
"Please contact MinIO at https://slack.min.io",
|
||||
"",
|
||||
)
|
||||
|
||||
ErrUnexpectedError = newErrFn(
|
||||
"Unexpected error",
|
||||
"Please contact MinIO at https://slack.min.io",
|
||||
|
||||
@@ -25,8 +25,8 @@ import (
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"go.etcd.io/etcd/v3/clientv3"
|
||||
"go.etcd.io/etcd/v3/clientv3/namespace"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.etcd.io/etcd/clientv3/namespace"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
106
cmd/config/heal/heal.go
Normal file
106
cmd/config/heal/heal.go
Normal file
@@ -0,0 +1,106 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package heal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
// Compression environment variables
|
||||
const (
|
||||
Bitrot = "bitrotscan"
|
||||
Sleep = "max_sleep"
|
||||
IOCount = "max_io"
|
||||
|
||||
EnvBitrot = "MINIO_HEAL_BITROTSCAN"
|
||||
EnvSleep = "MINIO_HEAL_MAX_SLEEP"
|
||||
EnvIOCount = "MINIO_HEAL_MAX_IO"
|
||||
)
|
||||
|
||||
// Config represents the heal settings.
|
||||
type Config struct {
|
||||
// Bitrot will perform bitrot scan on local disk when checking objects.
|
||||
Bitrot bool `json:"bitrotscan"`
|
||||
// maximum sleep duration between objects to slow down heal operation.
|
||||
Sleep time.Duration `json:"sleep"`
|
||||
IOCount int `json:"iocount"`
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultKVS - default KV config for heal settings
|
||||
DefaultKVS = config.KVS{
|
||||
config.KV{
|
||||
Key: Bitrot,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: Sleep,
|
||||
Value: "1s",
|
||||
},
|
||||
config.KV{
|
||||
Key: IOCount,
|
||||
Value: "10",
|
||||
},
|
||||
}
|
||||
|
||||
// Help provides help for config values
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: Bitrot,
|
||||
Description: `perform bitrot scan on disks when checking objects during crawl`,
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: Sleep,
|
||||
Description: `maximum sleep duration between objects to slow down heal operation. eg. 2s`,
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: IOCount,
|
||||
Description: `maximum IO requests allowed between objects to slow down heal operation. eg. 3`,
|
||||
Optional: true,
|
||||
Type: "int",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// LookupConfig - lookup config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
if err = config.CheckValidKeys(config.HealSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
cfg.Bitrot, err = config.ParseBool(env.Get(EnvBitrot, kvs.Get(Bitrot)))
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("'heal:bitrotscan' value invalid: %w", err)
|
||||
}
|
||||
cfg.Sleep, err = time.ParseDuration(env.Get(EnvSleep, kvs.Get(Sleep)))
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("'heal:max_sleep' value invalid: %w", err)
|
||||
}
|
||||
cfg.IOCount, err = strconv.Atoi(env.Get(EnvIOCount, kvs.Get(IOCount)))
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("'heal:max_io' value invalid: %w", err)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -340,6 +340,12 @@ var (
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.PostgresMaxOpenConnections,
|
||||
Description: "To set the maximum number of open connections to the database. The value is set to `2` by default.",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
}
|
||||
|
||||
HelpMySQL = config.HelpKVS{
|
||||
@@ -377,6 +383,12 @@ var (
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MySQLMaxOpenConnections,
|
||||
Description: "To set the maximum number of open connections to the database. The value is set to `2` by default.",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
}
|
||||
|
||||
HelpNATS = config.HelpKVS{
|
||||
|
||||
@@ -357,6 +357,10 @@ func SetNotifyPostgres(s config.Config, psqName string, cfg target.PostgreSQLArg
|
||||
Key: target.PostgresQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresMaxOpenConnections,
|
||||
Value: strconv.Itoa(cfg.MaxOpenConnections),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -554,6 +558,10 @@ func SetNotifyMySQL(s config.Config, sqlName string, cfg target.MySQLArgs) error
|
||||
Key: target.MySQLQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLMaxOpenConnections,
|
||||
Value: strconv.Itoa(cfg.MaxOpenConnections),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -807,6 +807,10 @@ var (
|
||||
Key: target.MySQLQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLMaxOpenConnections,
|
||||
Value: "2",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -855,13 +859,25 @@ func GetNotifyMySQL(mysqlKVS map[string]config.KVS) (map[string]target.MySQLArgs
|
||||
if k != config.Default {
|
||||
queueDirEnv = queueDirEnv + config.Default + k
|
||||
}
|
||||
|
||||
maxOpenConnectionsEnv := target.EnvMySQLMaxOpenConnections
|
||||
if k != config.Default {
|
||||
maxOpenConnectionsEnv = maxOpenConnectionsEnv + config.Default + k
|
||||
}
|
||||
|
||||
maxOpenConnections, cErr := strconv.Atoi(env.Get(maxOpenConnectionsEnv, kv.Get(target.MySQLMaxOpenConnections)))
|
||||
if cErr != nil {
|
||||
return nil, cErr
|
||||
}
|
||||
|
||||
mysqlArgs := target.MySQLArgs{
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.MySQLFormat)),
|
||||
DSN: env.Get(dsnStringEnv, kv.Get(target.MySQLDSNString)),
|
||||
Table: env.Get(tableEnv, kv.Get(target.MySQLTable)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.MySQLQueueDir)),
|
||||
QueueLimit: queueLimit,
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.MySQLFormat)),
|
||||
DSN: env.Get(dsnStringEnv, kv.Get(target.MySQLDSNString)),
|
||||
Table: env.Get(tableEnv, kv.Get(target.MySQLTable)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.MySQLQueueDir)),
|
||||
QueueLimit: queueLimit,
|
||||
MaxOpenConnections: maxOpenConnections,
|
||||
}
|
||||
if err = mysqlArgs.Validate(); err != nil {
|
||||
return nil, err
|
||||
@@ -1235,6 +1251,10 @@ var (
|
||||
Key: target.PostgresQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresMaxOpenConnections,
|
||||
Value: "2",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1285,13 +1305,24 @@ func GetNotifyPostgres(postgresKVS map[string]config.KVS) (map[string]target.Pos
|
||||
queueDirEnv = queueDirEnv + config.Default + k
|
||||
}
|
||||
|
||||
maxOpenConnectionsEnv := target.EnvPostgresMaxOpenConnections
|
||||
if k != config.Default {
|
||||
maxOpenConnectionsEnv = maxOpenConnectionsEnv + config.Default + k
|
||||
}
|
||||
|
||||
maxOpenConnections, cErr := strconv.Atoi(env.Get(maxOpenConnectionsEnv, kv.Get(target.PostgresMaxOpenConnections)))
|
||||
if cErr != nil {
|
||||
return nil, cErr
|
||||
}
|
||||
|
||||
psqlArgs := target.PostgreSQLArgs{
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.PostgresFormat)),
|
||||
ConnectionString: env.Get(connectionStringEnv, kv.Get(target.PostgresConnectionString)),
|
||||
Table: env.Get(tableEnv, kv.Get(target.PostgresTable)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.PostgresQueueDir)),
|
||||
QueueLimit: uint64(queueLimit),
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.PostgresFormat)),
|
||||
ConnectionString: env.Get(connectionStringEnv, kv.Get(target.PostgresConnectionString)),
|
||||
Table: env.Get(tableEnv, kv.Get(target.PostgresTable)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.PostgresQueueDir)),
|
||||
QueueLimit: uint64(queueLimit),
|
||||
MaxOpenConnections: maxOpenConnections,
|
||||
}
|
||||
if err = psqlArgs.Validate(); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -33,6 +33,12 @@ var (
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: ClassDMA,
|
||||
Description: `enable O_DIRECT for both read and write, defaults to "write" e.g. "read+write"`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
|
||||
@@ -18,6 +18,7 @@ package storageclass
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -32,17 +33,26 @@ const (
|
||||
RRS = "REDUCED_REDUNDANCY"
|
||||
// Standard storage class
|
||||
STANDARD = "STANDARD"
|
||||
// DMA storage class
|
||||
DMA = "DMA"
|
||||
|
||||
// Valid values are "write" and "read+write"
|
||||
DMAWrite = "write"
|
||||
DMAReadWrite = "read+write"
|
||||
)
|
||||
|
||||
// Standard constats for config info storage class
|
||||
const (
|
||||
ClassStandard = "standard"
|
||||
ClassRRS = "rrs"
|
||||
ClassDMA = "dma"
|
||||
|
||||
// Reduced redundancy storage class environment variable
|
||||
RRSEnv = "MINIO_STORAGE_CLASS_RRS"
|
||||
// Standard storage class environment variable
|
||||
StandardEnv = "MINIO_STORAGE_CLASS_STANDARD"
|
||||
// DMA storage class environment variable
|
||||
DMAEnv = "MINIO_STORAGE_CLASS_DMA"
|
||||
|
||||
// Supported storage class scheme is EC
|
||||
schemePrefix = "EC"
|
||||
@@ -52,6 +62,9 @@ const (
|
||||
|
||||
// Default RRS parity is always minimum parity.
|
||||
defaultRRSParity = minParityDisks
|
||||
|
||||
// Default DMA value
|
||||
defaultDMA = DMAWrite
|
||||
)
|
||||
|
||||
// DefaultKVS - default storage class config
|
||||
@@ -65,18 +78,24 @@ var (
|
||||
Key: ClassRRS,
|
||||
Value: "EC:2",
|
||||
},
|
||||
config.KV{
|
||||
Key: ClassDMA,
|
||||
Value: defaultDMA,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// StorageClass - holds storage class information
|
||||
type StorageClass struct {
|
||||
Parity int
|
||||
DMA string
|
||||
}
|
||||
|
||||
// Config storage class configuration
|
||||
type Config struct {
|
||||
Standard StorageClass `json:"standard"`
|
||||
RRS StorageClass `json:"rrs"`
|
||||
DMA StorageClass `json:"dma"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
|
||||
@@ -93,7 +112,7 @@ func (sCfg *Config) UnmarshalJSON(data []byte) error {
|
||||
// IsValid - returns true if input string is a valid
|
||||
// storage class kind supported.
|
||||
func IsValid(sc string) bool {
|
||||
return sc == RRS || sc == STANDARD
|
||||
return sc == RRS || sc == STANDARD || sc == DMA
|
||||
}
|
||||
|
||||
// UnmarshalText unmarshals storage class from its textual form into
|
||||
@@ -103,6 +122,14 @@ func (sc *StorageClass) UnmarshalText(b []byte) error {
|
||||
if scStr == "" {
|
||||
return nil
|
||||
}
|
||||
if scStr == DMAWrite {
|
||||
sc.DMA = DMAWrite
|
||||
return nil
|
||||
}
|
||||
if scStr == DMAReadWrite {
|
||||
sc.DMA = DMAReadWrite
|
||||
return nil
|
||||
}
|
||||
s, err := parseStorageClass(scStr)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -116,14 +143,14 @@ func (sc *StorageClass) MarshalText() ([]byte, error) {
|
||||
if sc.Parity != 0 {
|
||||
return []byte(fmt.Sprintf("%s:%d", schemePrefix, sc.Parity)), nil
|
||||
}
|
||||
return []byte(""), nil
|
||||
return []byte(sc.DMA), nil
|
||||
}
|
||||
|
||||
func (sc *StorageClass) String() string {
|
||||
if sc.Parity != 0 {
|
||||
return fmt.Sprintf("%s:%d", schemePrefix, sc.Parity)
|
||||
}
|
||||
return ""
|
||||
return sc.DMA
|
||||
}
|
||||
|
||||
// Parses given storageClassEnv and returns a storageClass structure.
|
||||
@@ -212,6 +239,11 @@ func (sCfg Config) GetParityForSC(sc string) (parity int) {
|
||||
}
|
||||
}
|
||||
|
||||
// GetDMA - returns DMA configuration.
|
||||
func (sCfg Config) GetDMA() string {
|
||||
return sCfg.DMA.DMA
|
||||
}
|
||||
|
||||
// Enabled returns if etcd is enabled.
|
||||
func Enabled(kvs config.KVS) bool {
|
||||
ssc := kvs.Get(ClassStandard)
|
||||
@@ -231,6 +263,7 @@ func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
|
||||
|
||||
ssc := env.Get(StandardEnv, kvs.Get(ClassStandard))
|
||||
rrsc := env.Get(RRSEnv, kvs.Get(ClassRRS))
|
||||
dma := env.Get(DMAEnv, kvs.Get(ClassDMA))
|
||||
// Check for environment variables and parse into storageClass struct
|
||||
if ssc != "" {
|
||||
cfg.Standard, err = parseStorageClass(ssc)
|
||||
@@ -252,6 +285,14 @@ func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
|
||||
cfg.RRS.Parity = defaultRRSParity
|
||||
}
|
||||
|
||||
if dma == "" {
|
||||
dma = defaultDMA
|
||||
}
|
||||
if dma != DMAReadWrite && dma != DMAWrite {
|
||||
return Config{}, errors.New(`valid dma values are "read-write" and "write"`)
|
||||
}
|
||||
cfg.DMA.DMA = dma
|
||||
|
||||
// Validation is done after parsing both the storage classes. This is needed because we need one
|
||||
// storage class value to deduce the correct value of the other storage class.
|
||||
if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, setDriveCount); err != nil {
|
||||
|
||||
@@ -40,8 +40,8 @@ type HTTPConsoleLoggerSys struct {
|
||||
logBuf *ring.Ring
|
||||
}
|
||||
|
||||
func mustGetNodeName(endpointZones EndpointZones) (nodeName string) {
|
||||
host, err := xnet.ParseHost(GetLocalPeer(endpointZones))
|
||||
func mustGetNodeName(endpointServerPools EndpointServerPools) (nodeName string) {
|
||||
host, err := xnet.ParseHost(GetLocalPeer(endpointServerPools))
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to start console logging subsystem")
|
||||
}
|
||||
@@ -63,8 +63,8 @@ func NewConsoleLogger(ctx context.Context) *HTTPConsoleLoggerSys {
|
||||
}
|
||||
|
||||
// SetNodeName - sets the node name if any after distributed setup has initialized
|
||||
func (sys *HTTPConsoleLoggerSys) SetNodeName(endpointZones EndpointZones) {
|
||||
sys.nodeName = mustGetNodeName(endpointZones)
|
||||
func (sys *HTTPConsoleLoggerSys) SetNodeName(endpointServerPools EndpointServerPools) {
|
||||
sys.nodeName = mustGetNodeName(endpointServerPools)
|
||||
}
|
||||
|
||||
// HasLogListeners returns true if console log listeners are registered
|
||||
@@ -122,6 +122,34 @@ func (sys *HTTPConsoleLoggerSys) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Endpoint - dummy function for interface compatibility
|
||||
func (sys *HTTPConsoleLoggerSys) Endpoint() string {
|
||||
return sys.console.Endpoint()
|
||||
}
|
||||
|
||||
// String - stringer function for interface compatibility
|
||||
func (sys *HTTPConsoleLoggerSys) String() string {
|
||||
return "console+http"
|
||||
}
|
||||
|
||||
// Content returns the console stdout log
|
||||
func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {
|
||||
sys.RLock()
|
||||
sys.logBuf.Do(func(p interface{}) {
|
||||
if p != nil {
|
||||
lg, ok := p.(log.Info)
|
||||
if ok {
|
||||
if (lg.Entry != log.Entry{}) {
|
||||
logs = append(logs, lg.Entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
sys.RUnlock()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Send log message 'e' to console and publish to console
|
||||
// log pubsub system
|
||||
func (sys *HTTPConsoleLoggerSys) Send(e interface{}, logKind string) error {
|
||||
|
||||
@@ -177,11 +177,10 @@ func (kes *kesService) CreateKey(keyID string) error { return kes.client.CreateK
|
||||
// named key referenced by keyID. It also binds the generated key
|
||||
// cryptographically to the provided context.
|
||||
func (kes *kesService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
var context bytes.Buffer
|
||||
ctx.WriteTo(&context)
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
|
||||
var plainKey []byte
|
||||
plainKey, sealedKey, err = kes.client.GenerateDataKey(keyID, context.Bytes())
|
||||
plainKey, sealedKey, err = kes.client.GenerateDataKey(keyID, context)
|
||||
if err != nil {
|
||||
return key, nil, err
|
||||
}
|
||||
@@ -200,11 +199,10 @@ func (kes *kesService) GenerateKey(keyID string, ctx Context) (key [32]byte, sea
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (kes *kesService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
var context bytes.Buffer
|
||||
ctx.WriteTo(&context)
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
|
||||
var plainKey []byte
|
||||
plainKey, err = kes.client.DecryptDataKey(keyID, sealedKey, context.Bytes())
|
||||
plainKey, err = kes.client.DecryptDataKey(keyID, sealedKey, context)
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
@@ -415,7 +413,7 @@ func (c *kesClient) postRetry(path string, body io.ReadSeeker, limit int64) (io.
|
||||
}
|
||||
|
||||
// If the error is not temp. / retryable => fail the request immediately.
|
||||
if !xnet.IsNetworkOrHostDown(err) &&
|
||||
if !xnet.IsNetworkOrHostDown(err, false) &&
|
||||
!errors.Is(err, io.EOF) &&
|
||||
!errors.Is(err, io.ErrUnexpectedEOF) &&
|
||||
!errors.Is(err, context.DeadlineExceeded) {
|
||||
|
||||
@@ -103,7 +103,6 @@ func (key ObjectKey) Seal(extKey, iv [32]byte, domain, bucket, object string) Se
|
||||
func (key *ObjectKey) Unseal(extKey [32]byte, sealedKey SealedKey, domain, bucket, object string) error {
|
||||
var (
|
||||
unsealConfig sio.Config
|
||||
decryptedKey bytes.Buffer
|
||||
)
|
||||
switch sealedKey.Algorithm {
|
||||
default:
|
||||
@@ -122,10 +121,9 @@ func (key *ObjectKey) Unseal(extKey [32]byte, sealedKey SealedKey, domain, bucke
|
||||
unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil)}
|
||||
}
|
||||
|
||||
if n, err := sio.Decrypt(&decryptedKey, bytes.NewReader(sealedKey.Key[:]), unsealConfig); n != 32 || err != nil {
|
||||
if out, err := sio.DecryptBuffer(key[:0], sealedKey.Key[:], unsealConfig); len(out) != 32 || err != nil {
|
||||
return ErrSecretKeyMismatch
|
||||
}
|
||||
copy(key[:], decryptedKey.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -165,11 +163,7 @@ func (key ObjectKey) UnsealETag(etag []byte) ([]byte, error) {
|
||||
if !IsETagSealed(etag) {
|
||||
return etag, nil
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte("SSE-etag"))
|
||||
if _, err := sio.Decrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
return sio.DecryptBuffer(make([]byte, 0, len(etag)), etag, sio.Config{Key: mac.Sum(nil)})
|
||||
}
|
||||
|
||||
@@ -39,6 +39,8 @@ type Context map[string]string
|
||||
//
|
||||
// WriteTo sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object to w.
|
||||
//
|
||||
// Note that neither keys nor values are escaped for JSON.
|
||||
func (c Context) WriteTo(w io.Writer) (n int64, err error) {
|
||||
sortedKeys := make(sort.StringSlice, 0, len(c))
|
||||
for k := range c {
|
||||
@@ -67,6 +69,53 @@ func (c Context) WriteTo(w io.Writer) (n int64, err error) {
|
||||
return n + int64(nn), err
|
||||
}
|
||||
|
||||
// AppendTo appends the context in a canonical from to dst.
|
||||
//
|
||||
// AppendTo sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object to w.
|
||||
//
|
||||
// Note that neither keys nor values are escaped for JSON.
|
||||
func (c Context) AppendTo(dst []byte) (output []byte) {
|
||||
if len(c) == 0 {
|
||||
return append(dst, '{', '}')
|
||||
}
|
||||
|
||||
// out should not escape.
|
||||
out := bytes.NewBuffer(dst)
|
||||
|
||||
// No need to copy+sort
|
||||
if len(c) == 1 {
|
||||
for k, v := range c {
|
||||
out.WriteString(`{"`)
|
||||
out.WriteString(k)
|
||||
out.WriteString(`":"`)
|
||||
out.WriteString(v)
|
||||
out.WriteString(`"}`)
|
||||
}
|
||||
return out.Bytes()
|
||||
}
|
||||
|
||||
sortedKeys := make([]string, 0, len(c))
|
||||
for k := range c {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
|
||||
out.WriteByte('{')
|
||||
for i, k := range sortedKeys {
|
||||
out.WriteByte('"')
|
||||
out.WriteString(k)
|
||||
out.WriteString(`":"`)
|
||||
out.WriteString(c[k])
|
||||
out.WriteByte('"')
|
||||
if i < len(sortedKeys)-1 {
|
||||
out.WriteByte(',')
|
||||
}
|
||||
}
|
||||
out.WriteByte('}')
|
||||
return out.Bytes()
|
||||
}
|
||||
|
||||
// KMS represents an active and authenticted connection
|
||||
// to a Key-Management-Service. It supports generating
|
||||
// data key generation and unsealing of KMS-generated
|
||||
@@ -155,13 +204,12 @@ func (kms *masterKeyKMS) Info() (info KMSInfo) {
|
||||
|
||||
func (kms *masterKeyKMS) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
var (
|
||||
buffer bytes.Buffer
|
||||
derivedKey = kms.deriveKey(keyID, ctx)
|
||||
)
|
||||
if n, err := sio.Decrypt(&buffer, bytes.NewReader(sealedKey), sio.Config{Key: derivedKey[:]}); err != nil || n != 32 {
|
||||
out, err := sio.DecryptBuffer(key[:0], sealedKey, sio.Config{Key: derivedKey[:]})
|
||||
if err != nil || len(out) != 32 {
|
||||
return key, err // TODO(aead): upgrade sio to use sio.Error
|
||||
}
|
||||
copy(key[:], buffer.Bytes())
|
||||
return key, nil
|
||||
}
|
||||
|
||||
@@ -171,7 +219,7 @@ func (kms *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte)
|
||||
}
|
||||
mac := hmac.New(sha256.New, kms.masterKey[:])
|
||||
mac.Write([]byte(keyID))
|
||||
context.WriteTo(mac)
|
||||
mac.Write(context.AppendTo(make([]byte, 0, 128)))
|
||||
mac.Sum(key[:0])
|
||||
return key
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -83,3 +84,32 @@ func TestContextWriteTo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContextAppendTo(t *testing.T) {
|
||||
for i, test := range contextWriteToTests {
|
||||
dst := make([]byte, 0, 1024)
|
||||
dst = test.Context.AppendTo(dst)
|
||||
if s := string(dst); s != test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON)
|
||||
}
|
||||
// Append one more
|
||||
dst = test.Context.AppendTo(dst)
|
||||
if s := string(dst); s != test.ExpectedJSON+test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON+test.ExpectedJSON)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkContext_AppendTo(b *testing.B) {
|
||||
tests := []Context{{}, {"bucket": "warp-benchmark-bucket"}, {"0": "1", "-": "2", ".": "#"}, {"34trg": "dfioutr89", "ikjfdghkjf": "jkedfhgfjkhg", "sdfhsdjkh": "if88889", "asddsirfh804": "kjfdshgdfuhgfg78-45604586#$%"}}
|
||||
for _, test := range tests {
|
||||
b.Run(fmt.Sprintf("%d-elems", len(test)), func(b *testing.B) {
|
||||
dst := make([]byte, 0, 1024)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
dst = test.AppendTo(dst[:0])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -204,15 +204,17 @@ func (s3) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []byte
|
||||
}
|
||||
|
||||
// Check whether all extracted values are well-formed
|
||||
iv, err := base64.StdEncoding.DecodeString(b64IV)
|
||||
if err != nil || len(iv) != 32 {
|
||||
var iv [32]byte
|
||||
n, err := base64.StdEncoding.Decode(iv[:], []byte(b64IV))
|
||||
if err != nil || n != 32 {
|
||||
return keyID, kmsKey, sealedKey, errInvalidInternalIV
|
||||
}
|
||||
if algorithm != SealAlgorithm {
|
||||
return keyID, kmsKey, sealedKey, errInvalidInternalSealAlgorithm
|
||||
}
|
||||
encryptedKey, err := base64.StdEncoding.DecodeString(b64SealedKey)
|
||||
if err != nil || len(encryptedKey) != 64 {
|
||||
var encryptedKey [64]byte
|
||||
n, err = base64.StdEncoding.Decode(encryptedKey[:], []byte(b64SealedKey))
|
||||
if err != nil || n != 64 {
|
||||
return keyID, kmsKey, sealedKey, Errorf("The internal sealed key for SSE-S3 is invalid")
|
||||
}
|
||||
if idPresent && kmsKeyPresent { // We are using a KMS -> parse the sealed KMS data key.
|
||||
@@ -223,8 +225,8 @@ func (s3) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []byte
|
||||
}
|
||||
|
||||
sealedKey.Algorithm = algorithm
|
||||
copy(sealedKey.IV[:], iv)
|
||||
copy(sealedKey.Key[:], encryptedKey)
|
||||
sealedKey.IV = iv
|
||||
sealedKey.Key = encryptedKey
|
||||
return keyID, kmsKey, sealedKey, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -224,11 +223,10 @@ func (v *vaultService) CreateKey(keyID string) error {
|
||||
// named key referenced by keyID. It also binds the generated key
|
||||
// cryptographically to the provided context.
|
||||
func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
"context": base64.StdEncoding.EncodeToString(context),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/datakey/plaintext/%s", keyID), payload)
|
||||
if err != nil {
|
||||
@@ -260,12 +258,11 @@ func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sea
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
"context": base64.StdEncoding.EncodeToString(context),
|
||||
}
|
||||
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/decrypt/%s", keyID), payload)
|
||||
@@ -294,12 +291,11 @@ func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (k
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UpdateKey(keyID string, sealedKey []byte, ctx Context) (rotatedKey []byte, err error) {
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
"context": base64.StdEncoding.EncodeToString(context),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/rewrap/%s", keyID), payload)
|
||||
if err != nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user