Compare commits
325 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b3ebc69034 | ||
|
|
761dde2f1b | ||
|
|
2bb6a3f4d0 | ||
|
|
e83e947ca3 | ||
|
|
73733a8fb9 | ||
|
|
ce6c23a360 | ||
|
|
99d8e6a30f | ||
|
|
2fa1d8ac48 | ||
|
|
ca7e425ce8 | ||
|
|
8b9a19eef1 | ||
|
|
7f629df4d5 | ||
|
|
98ddc3596c | ||
|
|
5d23be6242 | ||
|
|
d15d3a524b | ||
|
|
1e1d9acb1b | ||
|
|
55ee94bed0 | ||
|
|
d228d29944 | ||
|
|
013cc66d8e | ||
|
|
c7ed6eee5e | ||
|
|
8082d1fed6 | ||
|
|
d2a10dbe69 | ||
|
|
14645142db | ||
|
|
f34b2ef90b | ||
|
|
ce894665a8 | ||
|
|
0d00f3a55b | ||
|
|
48ff373ff7 | ||
|
|
e9efee0e64 | ||
|
|
4b3e7aee0b | ||
|
|
dd53b287f2 | ||
|
|
21526efe51 | ||
|
|
7413045f0e | ||
|
|
d76c508566 | ||
|
|
8fb46de5e4 | ||
|
|
af1944f28d | ||
|
|
214ea14f29 | ||
|
|
5fb420c703 | ||
|
|
2420f6c000 | ||
|
|
b0d7332a0c | ||
|
|
f71b56a5d0 | ||
|
|
d55efc791f | ||
|
|
f63645546d | ||
|
|
e2dd3e3587 | ||
|
|
27ab780317 | ||
|
|
e2d4d097e7 | ||
|
|
ac8cb6ba0d | ||
|
|
4ce81fd07f | ||
|
|
df9eeb7f8f | ||
|
|
31c4fdbf79 | ||
|
|
48e367ff7d | ||
|
|
fd02492cb7 | ||
|
|
addfa35d93 | ||
|
|
5afdc56796 | ||
|
|
fb1c333a83 | ||
|
|
c3e1da8e04 | ||
|
|
20a753e2e5 | ||
|
|
3a398775fb | ||
|
|
61a7434379 | ||
|
|
09f5e29327 | ||
|
|
29edb4ccfe | ||
|
|
197d6fb644 | ||
|
|
d4e565e595 | ||
|
|
1fce2b180f | ||
|
|
be6ccd129d | ||
|
|
f7cecf0945 | ||
|
|
7b2198f7e5 | ||
|
|
52221db7ef | ||
|
|
befbf48563 | ||
|
|
56a61bab56 | ||
|
|
d480022711 | ||
|
|
f1abb92f0c | ||
|
|
5792be71fa | ||
|
|
c2630bb3a3 | ||
|
|
5e3010d455 | ||
|
|
ccbf65c8e8 | ||
|
|
9d07cde385 | ||
|
|
464b9d7c80 | ||
|
|
5c81d0d89a | ||
|
|
62cd643868 | ||
|
|
c0bf02b8b2 | ||
|
|
1b7dd70f72 | ||
|
|
372a08be49 | ||
|
|
fd46a1c3b3 | ||
|
|
5aae7178ad | ||
|
|
dea8220eee | ||
|
|
a4be0b88f6 | ||
|
|
d8101573be | ||
|
|
41cdb357bb | ||
|
|
38caddffe7 | ||
|
|
80fe166902 | ||
|
|
0e26f983d6 | ||
|
|
fc08fcab52 | ||
|
|
77dc99e71d | ||
|
|
5041bfcb5c | ||
|
|
5be76856bd | ||
|
|
2a3f5e1ad1 | ||
|
|
f8650a3493 | ||
|
|
90a52a29c5 | ||
|
|
8859c92f80 | ||
|
|
01e5632949 | ||
|
|
18a4276e25 | ||
|
|
c06032f35f | ||
|
|
95a6b2c991 | ||
|
|
ee28f6caaa | ||
|
|
30c9e50701 | ||
|
|
9aadd725d2 | ||
|
|
6cfb1cb6fd | ||
|
|
2dc8ac1e62 | ||
|
|
e952e2a691 | ||
|
|
040ac5cad8 | ||
|
|
05685863e3 | ||
|
|
d324c0a1c3 | ||
|
|
03f8b25b50 | ||
|
|
b0e2c2da78 | ||
|
|
f28a8eca91 | ||
|
|
ca69e54cb6 | ||
|
|
4629abd5a2 | ||
|
|
dc99f4a7a3 | ||
|
|
389ec21d0c | ||
|
|
9341201132 | ||
|
|
88dd83a365 | ||
|
|
74285d50c4 | ||
|
|
60d0611ac2 | ||
|
|
f939222942 | ||
|
|
c293c2e9a3 | ||
|
|
e34ca9acd1 | ||
|
|
83071a3459 | ||
|
|
edf364bf21 | ||
|
|
1e037883b0 | ||
|
|
d909f167ff | ||
|
|
4592aaa3e2 | ||
|
|
95d1a12422 | ||
|
|
62aa42cccf | ||
|
|
5cffd3780a | ||
|
|
def75ffcfe | ||
|
|
ad8e611098 | ||
|
|
3ec1844e4a | ||
|
|
523670ba0d | ||
|
|
35dea24ffd | ||
|
|
e55104a155 | ||
|
|
111745c564 | ||
|
|
c7df1ffc6f | ||
|
|
2b7e75e079 | ||
|
|
bcdaa09c75 | ||
|
|
2fc65dcb99 | ||
|
|
44a3b58e52 | ||
|
|
0a256053ee | ||
|
|
46de9ac03e | ||
|
|
a53dc1d9c8 | ||
|
|
f0462322fd | ||
|
|
c59d2a6288 | ||
|
|
3e3ff2a70b | ||
|
|
16bc11e72e | ||
|
|
2719f1efaa | ||
|
|
cff1be0ae8 | ||
|
|
39ac62a1a1 | ||
|
|
f427dbbd60 | ||
|
|
c3f689a7d9 | ||
|
|
85f3a9f3b0 | ||
|
|
13ba4b433d | ||
|
|
96f27a4965 | ||
|
|
0e502899a8 | ||
|
|
424b44c247 | ||
|
|
01a71c366d | ||
|
|
990fbeb3a4 | ||
|
|
5a9a898ba2 | ||
|
|
fe1fbe0005 | ||
|
|
c56a139fdc | ||
|
|
df50eda811 | ||
|
|
f5d3313210 | ||
|
|
97fcc9ff99 | ||
|
|
8a6b2b4447 | ||
|
|
757eaeae92 | ||
|
|
b7dd61f6bc | ||
|
|
d2a95a04a4 | ||
|
|
0cc993f403 | ||
|
|
3a64580663 | ||
|
|
d087e28dce | ||
|
|
96adfaebe1 | ||
|
|
ddf84f8257 | ||
|
|
507f993075 | ||
|
|
73a6a60785 | ||
|
|
cf4cf58faf | ||
|
|
6bc3c74c0c | ||
|
|
598ce1e354 | ||
|
|
4685b76e08 | ||
|
|
78c9109f6c | ||
|
|
7e248fc0ba | ||
|
|
c526fa9119 | ||
|
|
520e0fd985 | ||
|
|
54a7eba358 | ||
|
|
1494ba2e6e | ||
|
|
a5b3548ede | ||
|
|
8318aa0113 | ||
|
|
e69c42956b | ||
|
|
53ca589c11 | ||
|
|
ca8ff8718e | ||
|
|
e8e48e4c4a | ||
|
|
67e17ed3f8 | ||
|
|
2a6a40e93b | ||
|
|
eda34423d7 | ||
|
|
7ce1f6e736 | ||
|
|
5c53620a72 | ||
|
|
5f94cec1e2 | ||
|
|
646350fa7f | ||
|
|
e162a055cc | ||
|
|
28d3ad3ada | ||
|
|
0bd44a7764 | ||
|
|
8be6d887e2 | ||
|
|
66b14a0d32 | ||
|
|
153a612253 | ||
|
|
1a1b55e133 | ||
|
|
879de20edf | ||
|
|
e77ad3f9bb | ||
|
|
4ce86ff5fa | ||
|
|
e290c010e6 | ||
|
|
33d267fa1b | ||
|
|
601a744159 | ||
|
|
f630d7c3fa | ||
|
|
91bfefcf8c | ||
|
|
a1b01e6d5f | ||
|
|
48594617b5 | ||
|
|
b35b9dcff7 | ||
|
|
a3e317773a | ||
|
|
16431d222c | ||
|
|
ee49a23220 | ||
|
|
a9eef521ec | ||
|
|
901d33b59c | ||
|
|
255116fde7 | ||
|
|
00ebea2536 | ||
|
|
dedf9774c7 | ||
|
|
6b1c62133d | ||
|
|
d4251b2545 | ||
|
|
b9d1698d74 | ||
|
|
7c696e1cb6 | ||
|
|
165d60421d | ||
|
|
c7962118f8 | ||
|
|
892a204013 | ||
|
|
0e6aedc7ed | ||
|
|
c547a4d835 | ||
|
|
fc9668baa5 | ||
|
|
ba17d46f15 | ||
|
|
54a4f93854 | ||
|
|
bdd816488d | ||
|
|
36dcfee2f7 | ||
|
|
4d13ddf6b3 | ||
|
|
9e25475475 | ||
|
|
e955aa7f2a | ||
|
|
81d2b54dfd | ||
|
|
7956ff0313 | ||
|
|
9ff25fb64b | ||
|
|
04df69f633 | ||
|
|
908eb57795 | ||
|
|
ecfae074dc | ||
|
|
be5d394e56 | ||
|
|
849a27ee61 | ||
|
|
062f3ea43a | ||
|
|
5cfedcfe33 | ||
|
|
4d2fc530d0 | ||
|
|
3970204009 | ||
|
|
f046f557fa | ||
|
|
401958938d | ||
|
|
566cffe53d | ||
|
|
028bc2f9be | ||
|
|
813d9bc316 | ||
|
|
79ba458051 | ||
|
|
cf220be9b5 | ||
|
|
c433572585 | ||
|
|
a42b576382 | ||
|
|
fb9b53026d | ||
|
|
2ac54e5a7b | ||
|
|
8eecdc6d1f | ||
|
|
50577e2bd2 | ||
|
|
7bc1f986e8 | ||
|
|
d796621ccc | ||
|
|
751e9fb7be | ||
|
|
f6113264f4 | ||
|
|
a3534a730b | ||
|
|
7f8b8a0e43 | ||
|
|
bd6f7b6d83 | ||
|
|
b0a4beb66a | ||
|
|
472c2d828c | ||
|
|
01ee49045e | ||
|
|
7bd9f821dd | ||
|
|
b20ecc7b54 | ||
|
|
61eb9d4e29 | ||
|
|
43eb5a001c | ||
|
|
f58692abb7 | ||
|
|
c1760fb764 | ||
|
|
e9bc0e7e98 | ||
|
|
ffcadcd99e | ||
|
|
7a733a8d54 | ||
|
|
ce97313fda | ||
|
|
7b81967a3c | ||
|
|
ff811f594b | ||
|
|
0bf80b3c89 | ||
|
|
ae3b369fe1 | ||
|
|
77b15e7194 | ||
|
|
20537f974e | ||
|
|
4476a64bdf | ||
|
|
d4b701576e | ||
|
|
721c053712 | ||
|
|
e3071157f0 | ||
|
|
c07af89e48 | ||
|
|
9c846106fa | ||
|
|
cf94d1f1f1 | ||
|
|
6187440f35 | ||
|
|
57b7c3494f | ||
|
|
dda18c28c5 | ||
|
|
f8d6eaaa96 | ||
|
|
47d4fabb58 | ||
|
|
80039f60d5 | ||
|
|
5a5e9b8a89 | ||
|
|
b7ed3b77bd | ||
|
|
75b925c326 | ||
|
|
91d419ee6c | ||
|
|
23345098ea | ||
|
|
7ce91ea1a1 | ||
|
|
41079f1015 | ||
|
|
712dfa40cd | ||
|
|
decfd6108c | ||
|
|
b890bbfa63 | ||
|
|
0e3a570b85 | ||
|
|
7060c809c0 | ||
|
|
9dbfd84c5b | ||
|
|
fce380a044 |
5
.github/markdown-lint-cfg.yaml
vendored
Normal file
5
.github/markdown-lint-cfg.yaml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# Config file for markdownlint-cli
|
||||
MD033:
|
||||
allowed_elements:
|
||||
- details
|
||||
- summary
|
||||
10
.github/workflows/go-cross.yml
vendored
10
.github/workflows/go-cross.yml
vendored
@@ -11,19 +11,23 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build Tests with Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
go-version: [1.17.x, 1.18.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
|
||||
- uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
|
||||
51
.github/workflows/go-fips.yml
vendored
Normal file
51
.github/workflows/go-fips.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: FIPS Build Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go BoringCrypto ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.11b7, 1.18.3b7]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Setup dockerfile for build test
|
||||
run: |
|
||||
echo "FROM us-docker.pkg.dev/google.com/api-project-999119582588/go-boringcrypto/golang:${{ matrix.go-version }}" > Dockerfile.fips.test
|
||||
echo "COPY . /minio" >> Dockerfile.fips.test
|
||||
echo "WORKDIR /minio" >> Dockerfile.fips.test
|
||||
echo "RUN make" >> Dockerfile.fips.test
|
||||
|
||||
- name: Build
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile.fips.test
|
||||
push: false
|
||||
load: true
|
||||
tags: minio/fips-test:latest
|
||||
|
||||
# This should fail if grep returns non-zero exit
|
||||
- name: Test binary
|
||||
run: |
|
||||
docker run --rm minio/fips-test:latest ./minio --version
|
||||
docker run --rm -i minio/fips-test:latest /bin/bash -c 'go tool nm ./minio' | grep -q FIPS
|
||||
12
.github/workflows/go-healing.yml
vendored
12
.github/workflows/go-healing.yml
vendored
@@ -11,6 +11,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
@@ -21,9 +24,10 @@ jobs:
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
@@ -37,12 +41,10 @@ jobs:
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
MINIO_KMS_KES_CERT_FILE: /home/runner/work/minio/minio/.github/workflows/root.cert
|
||||
MINIO_KMS_KES_KEY_FILE: /home/runner/work/minio/minio/.github/workflows/root.key
|
||||
MINIO_KMS_KES_ENDPOINT: "https://play.min.io:7373"
|
||||
MINIO_KMS_KES_KEY_NAME: "my-minio-key"
|
||||
MINIO_KMS_SECRET_KEY: "my-minio-key:oyArl7zlPECEduNbB1KXgdzDn2Bdpvvw0l8VO51HQnY="
|
||||
MINIO_KMS_AUTO_ENCRYPTION: on
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make verify-healing
|
||||
make verify-healing-inconsistent-versions
|
||||
|
||||
8
.github/workflows/go-lint.yml
vendored
8
.github/workflows/go-lint.yml
vendored
@@ -11,19 +11,23 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
go-version: [1.17.x, 1.18.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
with:
|
||||
|
||||
8
.github/workflows/go.yml
vendored
8
.github/workflows/go.yml
vendored
@@ -11,19 +11,23 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }} - healing
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
go-version: [1.17.x, 1.18.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
|
||||
36
.github/workflows/iam-integrations.yaml
vendored
36
.github/workflows/iam-integrations.yaml
vendored
@@ -11,6 +11,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
iam-matrix-test:
|
||||
name: "[Go=${{ matrix.go-version }}|ldap=${{ matrix.ldap }}|etcd=${{ matrix.etcd }}|openid=${{ matrix.openid }}]"
|
||||
@@ -44,6 +47,14 @@ jobs:
|
||||
- "5556:5556"
|
||||
env:
|
||||
DEX_LDAP_SERVER: "openldap:389"
|
||||
openid2:
|
||||
image: quay.io/minio/dex
|
||||
ports:
|
||||
- "5557:5557"
|
||||
env:
|
||||
DEX_LDAP_SERVER: "openldap:389"
|
||||
DEX_ISSUER: "http://127.0.0.1:5557/dex"
|
||||
DEX_WEB_HTTP: "0.0.0.0:5557"
|
||||
|
||||
strategy:
|
||||
# When ldap, etcd or openid vars are empty below, those external servers
|
||||
@@ -65,9 +76,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
@@ -85,6 +97,28 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-iam
|
||||
- name: Test with multiple OpenID providers
|
||||
if: matrix.openid == 'http://127.0.0.1:5556/dex'
|
||||
env:
|
||||
LDAP_TEST_SERVER: ${{ matrix.ldap }}
|
||||
ETCD_SERVER: ${{ matrix.etcd }}
|
||||
OPENID_TEST_SERVER: ${{ matrix.openid }}
|
||||
OPENID_TEST_SERVER_2: "http://127.0.0.1:5557/dex"
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-iam
|
||||
- name: Test with Access Management Plugin enabled
|
||||
env:
|
||||
LDAP_TEST_SERVER: ${{ matrix.ldap }}
|
||||
ETCD_SERVER: ${{ matrix.etcd }}
|
||||
OPENID_TEST_SERVER: ${{ matrix.openid }}
|
||||
POLICY_PLUGIN_ENDPOINT: "http://127.0.0.1:8080"
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
go run docs/iam/access-manager-plugin.go &
|
||||
make test-iam
|
||||
- name: Test LDAP for automatic site replication
|
||||
if: matrix.ldap == 'localhost:389'
|
||||
run: |
|
||||
|
||||
7
.github/workflows/markdown-lint.yaml
vendored
7
.github/workflows/markdown-lint.yaml
vendored
@@ -11,6 +11,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint all docs
|
||||
@@ -22,4 +25,6 @@ jobs:
|
||||
- name: Lint all docs
|
||||
run: |
|
||||
npm install -g markdownlint-cli
|
||||
markdownlint --fix '**/*.md' --disable MD013 MD040
|
||||
markdownlint --fix '**/*.md' \
|
||||
--config /home/runner/work/minio/minio/.github/markdown-lint-cfg.yaml \
|
||||
--disable MD013 MD040
|
||||
|
||||
6
.github/workflows/replication.yaml
vendored
6
.github/workflows/replication.yaml
vendored
@@ -11,6 +11,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
replication-test:
|
||||
name: Replication Tests with Go ${{ matrix.go-version }}
|
||||
@@ -22,9 +25,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
|
||||
7
.github/workflows/upgrade-ci-cd.yaml
vendored
7
.github/workflows/upgrade-ci-cd.yaml
vendored
@@ -11,6 +11,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
@@ -22,10 +25,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
check-latest: true
|
||||
- name: Start upgrade tests
|
||||
run: |
|
||||
make test-upgrade
|
||||
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -9,8 +9,7 @@ site/
|
||||
/.idea/
|
||||
/Minio.iml
|
||||
**/access.log
|
||||
vendor/**/*.js
|
||||
vendor/**/*.json
|
||||
vendor/
|
||||
.DS_Store
|
||||
*.syso
|
||||
coverage.txt
|
||||
@@ -32,4 +31,10 @@ hash-set
|
||||
minio.RELEASE*
|
||||
mc
|
||||
nancy
|
||||
inspects/*
|
||||
inspects/*
|
||||
docs/debugging/s3-verify/s3-verify
|
||||
docs/debugging/xl-meta/xl-meta
|
||||
docs/debugging/s3-check-md5/s3-check-md5
|
||||
docs/debugging/hash-set/hash-set
|
||||
docs/debugging/healing-bin/healing-bin
|
||||
docs/debugging/inspect/inspect
|
||||
|
||||
@@ -25,6 +25,8 @@ linters:
|
||||
- varcheck
|
||||
- gocritic
|
||||
- gofumpt
|
||||
- tenv
|
||||
- durationcheck
|
||||
|
||||
linters-settings:
|
||||
gofumpt:
|
||||
|
||||
14
Makefile
14
Makefile
@@ -19,7 +19,7 @@ help: ## print this help
|
||||
|
||||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.43.0
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.45.2
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7-0.20211026165309-e818a1881b0e
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
|
||||
@@ -53,11 +53,12 @@ test-iam: build ## verify IAM (external IDP, etcd backends)
|
||||
@echo "Running tests for IAM (external IDP, etcd backends)"
|
||||
@CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
|
||||
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
|
||||
@CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
|
||||
|
||||
test-replication: install ## verify multi site replication
|
||||
@echo "Running tests for replicating three sites"
|
||||
@(env bash $(PWD)/docs/bucket/replication/setup_3site_replication.sh)
|
||||
@(env bash $(PWD)/docs/bucket/replication/setup_2site_existing_replication.sh)
|
||||
|
||||
test-site-replication-ldap: install ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with LDAP)"
|
||||
@@ -73,15 +74,20 @@ test-site-replication-minio: install ## verify automatic site replication
|
||||
|
||||
verify: ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
verify-healing: ## verify healing and replacing disks with minio binary
|
||||
@echo "Verify healing build with race"
|
||||
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
@(env bash $(PWD)/buildscripts/unaligned-healing.sh)
|
||||
|
||||
verify-healing-inconsistent-versions: ## verify resolving inconsistent versions
|
||||
@echo "Verify resolving inconsistent versions build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)
|
||||
|
||||
build: checks ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
7
README.fips.md
Normal file
7
README.fips.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# MinIO FIPS Builds
|
||||
|
||||
MinIO creates FIPS builds using a patched version of the Go compiler (that uses BoringCrypto, from BoringSSL, which is [FIPS 140-2 validated](https://csrc.nist.gov/csrc/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp2964.pdf)) published by the Golang Team [here](https://github.com/golang/go/tree/dev.boringcrypto/misc/boring).
|
||||
|
||||
MinIO FIPS executables are available at http://dl.min.io - they are only published for `linux-amd64` architecture as binary files with the suffix `.fips`. We also publish corresponding container images to our official image repositories.
|
||||
|
||||
We are not making any statements or representations about the suitability of this code or build in relation to the FIPS 140-2 standard. Interested users will have to evaluate for themselves whether this is useful for their own purposes.
|
||||
16
README.md
16
README.md
@@ -196,12 +196,6 @@ iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
## Pre-existing data
|
||||
|
||||
When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.
|
||||
|
||||
The above statement is also valid for all gateway backends.
|
||||
|
||||
## Test MinIO Connectivity
|
||||
|
||||
### Test using MinIO Console
|
||||
@@ -242,16 +236,16 @@ Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all tr
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
```
|
||||
|
||||
- For deployments without external internet access (e.g. airgapped environments), download the binary from <https://dl.min.io> and replace the existing MinIO binary let's say for example `/opt/bin/minio`, apply executable permissions `chmod +x /opt/bin/minio` and do `mc admin service restart alias/`.
|
||||
- For deployments without external internet access (e.g. airgapped environments), download the binary from <https://dl.min.io> and replace the existing MinIO binary let's say for example `/opt/bin/minio`, apply executable permissions `chmod +x /opt/bin/minio` and proceed to perform `mc admin service restart alias/`.
|
||||
|
||||
- For RPM/DEB installations, upgrade packages **parallelly** on all servers. Once upgraded, perform `systemctl restart minio` across all nodes in **parallel**. RPM/DEB based installations are usually automated using [`ansible`](https://github.com/minio/ansible-minio).
|
||||
- For installations using Systemd MinIO service, upgrade via RPM/DEB packages **parallelly** on all servers or replace the binary lets say `/opt/bin/minio` on all nodes, apply executable permissions `chmod +x /opt/bin/minio` and process to perform `mc admin service restart alias/`.
|
||||
|
||||
### Upgrade Checklist
|
||||
|
||||
- Test all upgrades in a lower environment (DEV, QA, UAT) before applying to production. Performing blind upgrades in production environments carries significant risk.
|
||||
- Read the release notes for the targeted MinIO release *before* performing any installation, there is no forced requirement to upgrade to latest releases every week. If it has a bug fix you are looking for then yes, else avoid actively upgrading a running production system.
|
||||
- Make sure MinIO process has write access to `/opt/bin` if you plan to use `mc admin update`. This is needed for MinIO to download the latest binary from <https://dl.min.io> and save it locally for upgrades.
|
||||
- `mc admin update` is not supported in kubernetes/container environments, container environments provide their own mechanisms for container updates.
|
||||
- Read the release notes for MinIO *before* performing any upgrade, there is no forced requirement to upgrade to latest releases upon every releases. Some releases may not be relevant to your setup, avoid upgrading production environments unnecessarily.
|
||||
- If you plan to use `mc admin update`, MinIO process must have write access to the parent directory where the binary is present on the host system.
|
||||
- `mc admin update` is not supported and should be avoided in kubernetes/container environments, please upgrade containers by upgrading relevant container images.
|
||||
- **We do not recommend upgrading one MinIO server at a time, the product is designed to support parallel upgrades please follow our recommended guidelines.**
|
||||
|
||||
## Explore Further
|
||||
|
||||
Binary file not shown.
Binary file not shown.
BIN
buildscripts/cicd-corpus/disk2/bucket/testobj/xl.meta
Normal file
BIN
buildscripts/cicd-corpus/disk2/bucket/testobj/xl.meta
Normal file
Binary file not shown.
Binary file not shown.
BIN
buildscripts/cicd-corpus/disk3/bucket/testobj/xl.meta
Normal file
BIN
buildscripts/cicd-corpus/disk3/bucket/testobj/xl.meta
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
buildscripts/cicd-corpus/disk4/bucket/testobj/xl.meta
Normal file
BIN
buildscripts/cicd-corpus/disk4/bucket/testobj/xl.meta
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
buildscripts/cicd-corpus/disk5/bucket/testobj/xl.meta
Normal file
BIN
buildscripts/cicd-corpus/disk5/bucket/testobj/xl.meta
Normal file
Binary file not shown.
@@ -24,14 +24,18 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func genLDFlags(version string) string {
|
||||
releaseTag, date := releaseTag(version)
|
||||
copyrightYear := strconv.Itoa(date.Year())
|
||||
ldflagsStr := "-s -w"
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.Version=" + version
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.ReleaseTag=" + releaseTag(version)
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.CopyrightYear=" + copyrightYear
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.ReleaseTag=" + releaseTag
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.CommitID=" + commitID()
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.ShortCommitID=" + commitID()[:12]
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.GOPATH=" + os.Getenv("GOPATH")
|
||||
@@ -40,7 +44,7 @@ func genLDFlags(version string) string {
|
||||
}
|
||||
|
||||
// genReleaseTag prints release tag to the console for easy git tagging.
|
||||
func releaseTag(version string) string {
|
||||
func releaseTag(version string) (string, time.Time) {
|
||||
relPrefix := "DEVELOPMENT"
|
||||
if prefix := os.Getenv("MINIO_RELEASE"); prefix != "" {
|
||||
relPrefix = prefix
|
||||
@@ -53,14 +57,17 @@ func releaseTag(version string) string {
|
||||
|
||||
relTag := strings.Replace(version, " ", "-", -1)
|
||||
relTag = strings.Replace(relTag, ":", "-", -1)
|
||||
t, err := time.Parse("2006-01-02T15-04-05Z", relTag)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
relTag = strings.Replace(relTag, ",", "", -1)
|
||||
relTag = relPrefix + "." + relTag
|
||||
|
||||
if relSuffix != "" {
|
||||
relTag += "." + relSuffix
|
||||
}
|
||||
|
||||
return relTag
|
||||
return relTag, t
|
||||
}
|
||||
|
||||
// commitID returns the abbreviated commit-id hash of the last commit.
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
export GORACE="history_size=7"
|
||||
## TODO remove `dsync` from race detector once this is merged and released https://go-review.googlesource.com/c/go/+/333529/
|
||||
for d in $(go list ./... | grep -v dsync); do
|
||||
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
|
||||
|
||||
72
buildscripts/resolve-right-versions.sh
Executable file
72
buildscripts/resolve-right-versions.sh
Executable file
@@ -0,0 +1,72 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function start_minio_5drive() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
||||
"${WORK_DIR}/mc" cp --quiet -r "buildscripts/cicd-corpus/" "${WORK_DIR}/cicd-corpus/"
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/cicd-corpus/disk{1...5}" > "${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 30
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${WORK_DIR}/mc" stat minio/bucket/testobj
|
||||
|
||||
pkill minio
|
||||
sleep 3
|
||||
}
|
||||
|
||||
function main() {
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
start_minio_5drive ${start_port}
|
||||
}
|
||||
|
||||
function purge()
|
||||
{
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
( main "$@" )
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
@@ -18,15 +18,31 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
@@ -120,7 +136,7 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -145,7 +161,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
bucket := pathClean(vars["bucket"])
|
||||
update := r.Form.Get("update") == "true"
|
||||
|
||||
if !globalIsErasure {
|
||||
if globalIsGateway {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -274,7 +290,8 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt
|
||||
vars := mux.Vars(r)
|
||||
bucket := pathClean(vars["bucket"])
|
||||
arnType := vars["type"]
|
||||
if !globalIsErasure {
|
||||
|
||||
if globalIsGateway {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -314,7 +331,7 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
|
||||
bucket := pathClean(vars["bucket"])
|
||||
arn := vars["arn"]
|
||||
|
||||
if !globalIsErasure {
|
||||
if globalIsGateway {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -353,3 +370,688 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// ExportBucketMetadataHandler - exports all bucket metadata as a zipped file
|
||||
func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ExportBucketMetadata")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
bucket := pathClean(r.Form.Get("bucket"))
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ExportBucketMetadataAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
var (
|
||||
buckets []BucketInfo
|
||||
err error
|
||||
)
|
||||
if bucket != "" {
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
buckets = append(buckets, BucketInfo{Name: bucket})
|
||||
} else {
|
||||
buckets, err = objectAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize a zip writer which will provide a zipped content
|
||||
// of bucket metadata
|
||||
zipWriter := zip.NewWriter(w)
|
||||
defer zipWriter.Close()
|
||||
rawDataFn := func(r io.Reader, filename string, sz int) error {
|
||||
header, zerr := zip.FileInfoHeader(dummyFileInfo{
|
||||
name: filename,
|
||||
size: int64(sz),
|
||||
mode: 0o600,
|
||||
modTime: time.Now(),
|
||||
isDir: false,
|
||||
sys: nil,
|
||||
})
|
||||
if zerr != nil {
|
||||
logger.LogIf(ctx, zerr)
|
||||
return nil
|
||||
}
|
||||
header.Method = zip.Deflate
|
||||
zwriter, zerr := zipWriter.CreateHeader(header)
|
||||
if zerr != nil {
|
||||
logger.LogIf(ctx, zerr)
|
||||
return nil
|
||||
}
|
||||
if _, err := io.Copy(zwriter, r); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
cfgFiles := []string{
|
||||
bucketPolicyConfig,
|
||||
bucketNotificationConfig,
|
||||
bucketLifecycleConfig,
|
||||
bucketSSEConfig,
|
||||
bucketTaggingConfig,
|
||||
bucketQuotaConfigFile,
|
||||
objectLockConfig,
|
||||
bucketVersioningConfig,
|
||||
bucketReplicationConfig,
|
||||
bucketTargetsFile,
|
||||
}
|
||||
for _, bi := range buckets {
|
||||
for _, cfgFile := range cfgFiles {
|
||||
cfgPath := pathJoin(bi.Name, cfgFile)
|
||||
bucket := bi.Name
|
||||
switch cfgFile {
|
||||
case bucketNotificationConfig:
|
||||
config, err := globalBucketMetadataSys.GetNotificationConfig(bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketLifecycleConfig:
|
||||
config, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketQuotaConfigFile:
|
||||
config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketQuotaConfigNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketSSEConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketSSEConfigNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketTaggingConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketTaggingNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case objectLockConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketObjectLockConfigNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketVersioningConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetVersioningConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
// ignore empty versioning configs
|
||||
if config.Status != versioning.Enabled && config.Status != versioning.Suspended {
|
||||
continue
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketReplicationConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketTargetsFile:
|
||||
config, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketRemoteTargetNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ImportBucketMetadataHandler - imports all bucket metadata from a zipped file and overwrite bucket metadata config
|
||||
// There are some caveats regarding the following:
|
||||
// 1. object lock config - object lock should have been specified at time of bucket creation. Only default retention settings are imported here.
|
||||
// 2. Replication config - is omitted from import as remote target credentials are not available from exported data for security reasons.
|
||||
// 3. lifecycle config - if transition rules are present, tier name needs to have been defined.
|
||||
func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ImportBucketMetadata")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
bucket := pathClean(r.Form.Get("bucket"))
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ImportBucketMetadataAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
reader := bytes.NewReader(data)
|
||||
zr, err := zip.NewReader(reader, int64(len(data)))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
bucketMap := make(map[string]struct{}, 1)
|
||||
|
||||
// import object lock config if any - order of import matters here.
|
||||
for _, file := range zr.File {
|
||||
slc := strings.Split(file.Name, slashSeparator)
|
||||
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
b, fileName := slc[0], slc[1]
|
||||
if bucket == "" { // use bucket requested in query parameters if specified. Otherwise default bucket name to directory name within zip
|
||||
bucket = b
|
||||
}
|
||||
switch fileName {
|
||||
case objectLockConfig:
|
||||
reader, err := file.Open()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
config, err := objectlock.ParseObjectLockConfig(reader)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
apiErr.Description = err.Error()
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if _, ok := bucketMap[bucket]; !ok {
|
||||
opts := BucketOptions{
|
||||
LockEnabled: config.ObjectLockEnabled == "Enabled",
|
||||
}
|
||||
err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
bucketMap[bucket] = struct{}{}
|
||||
}
|
||||
|
||||
// Deny object locking configuration settings on existing buckets without object lock enabled.
|
||||
if _, _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
//
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeObjectLockConfig,
|
||||
Bucket: bucket,
|
||||
ObjectLockConfig: &cfgStr,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// import versioning metadata
|
||||
for _, file := range zr.File {
|
||||
slc := strings.Split(file.Name, slashSeparator)
|
||||
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
b, fileName := slc[0], slc[1]
|
||||
if bucket == "" { // use bucket requested in query parameters if specified. Otherwise default bucket name to directory name within zip
|
||||
bucket = b
|
||||
}
|
||||
switch fileName {
|
||||
case bucketVersioningConfig:
|
||||
reader, err := file.Open()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
v, err := versioning.ParseConfig(io.LimitReader(reader, maxBucketVersioningConfigSize))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if _, ok := bucketMap[bucket]; !ok {
|
||||
err = objectAPI.MakeBucketWithLocation(ctx, bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
bucketMap[bucket] = struct{}{}
|
||||
}
|
||||
|
||||
if globalSiteReplicationSys.isEnabled() && v.Suspended() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "Cluster replication is enabled for this site, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled && v.Suspended() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "An Object Lock configuration is present on this bucket, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}, r.URL)
|
||||
return
|
||||
}
|
||||
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "A replication configuration is present on this bucket, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(v)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range zr.File {
|
||||
reader, err := file.Open()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
sz := file.FileInfo().Size()
|
||||
slc := strings.Split(file.Name, slashSeparator)
|
||||
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
b, fileName := slc[0], slc[1]
|
||||
if bucket == "" { // use bucket requested in query parameters if specified. Otherwise default bucket name to directory name within zip
|
||||
bucket = b
|
||||
}
|
||||
// create bucket if it does not exist yet.
|
||||
if _, ok := bucketMap[bucket]; !ok {
|
||||
err = objectAPI.MakeBucketWithLocation(ctx, bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
bucketMap[bucket] = struct{}{}
|
||||
}
|
||||
switch fileName {
|
||||
case bucketNotificationConfig:
|
||||
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalNotificationSys.targetList)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
if event.IsEventError(err) {
|
||||
apiErr = importError(ctx, err, file.Name, bucket)
|
||||
}
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketNotificationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rulesMap := config.ToRulesMap()
|
||||
globalNotificationSys.AddRulesMap(bucket, rulesMap)
|
||||
case bucketPolicyConfig:
|
||||
// Error out if Content-Length is beyond allowed size.
|
||||
if sz > maxBucketPolicySize {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPolicyTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucketPolicyBytes, err := ioutil.ReadAll(io.LimitReader(reader, sz))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucketPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyBytes), bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Version in policy must not be empty
|
||||
if bucketPolicy.Version == "" {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := json.Marshal(bucketPolicy)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
// Call site replication hook.
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypePolicy,
|
||||
Bucket: bucket,
|
||||
Policy: bucketPolicyBytes,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
case bucketLifecycleConfig:
|
||||
bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(reader, sz))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the received bucket policy document
|
||||
if err = bucketLifecycle.Validate(); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the transition storage ARNs
|
||||
if err = validateTransitionTier(bucketLifecycle); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(bucketLifecycle)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketSSEConfig:
|
||||
// Parse bucket encryption xml
|
||||
encConfig, err := validateBucketSSEConfig(io.LimitReader(reader, maxBucketSSEConfigSize))
|
||||
if err != nil {
|
||||
apiErr := APIError{
|
||||
Code: "MalformedXML",
|
||||
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrMalformedXML].Description, err),
|
||||
HTTPStatusCode: errorCodes[ErrMalformedXML].HTTPStatusCode,
|
||||
}
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Return error if KMS is not initialized
|
||||
if GlobalKMS == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
|
||||
return
|
||||
}
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(kmsKey, kmsContext)
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
writeErrorResponse(ctx, w, importError(ctx, errKMSKeyNotFound, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(encConfig)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Store the bucket encryption configuration in the object layer
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
//
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: &cfgStr,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
case bucketTaggingConfig:
|
||||
tags, err := tags.ParseBucketXML(io.LimitReader(reader, sz))
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErrWithErr(ErrMalformedXML, fmt.Errorf("error importing %s with %w", file.Name, err))
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(tags)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
// Call site replication hook.
|
||||
//
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
Tags: &cfgStr,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketQuotaConfigFile:
|
||||
data, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
quotaConfig, err := parseBucketQuota(bucket, data)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if quotaConfig.Type == "fifo" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucketMeta := madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeQuotaConfig,
|
||||
Bucket: bucket,
|
||||
Quota: data,
|
||||
}
|
||||
if quotaConfig.Quota == 0 {
|
||||
bucketMeta.Quota = nil
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta); err != nil {
|
||||
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/kes"
|
||||
@@ -96,6 +97,12 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
}
|
||||
default:
|
||||
switch {
|
||||
case errors.Is(err, errTooManyPolicies):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminInvalidRequest",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errDecommissionAlreadyRunning):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioDecommissionNotAllowed",
|
||||
@@ -180,7 +187,7 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierBackendInUse",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierBackendNotEmpty):
|
||||
apiErr = APIError{
|
||||
@@ -217,3 +224,19 @@ func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
// wraps export error for more context
|
||||
func exportError(ctx context.Context, err error, fname, entity string) APIError {
|
||||
if entity == "" {
|
||||
return toAPIError(ctx, fmt.Errorf("error exporting %s with: %w", fname, err))
|
||||
}
|
||||
return toAPIError(ctx, fmt.Errorf("error exporting %s from %s with: %w", entity, fname, err))
|
||||
}
|
||||
|
||||
// wraps import error for more context
|
||||
func importError(ctx context.Context, err error, fname, entity string) APIError {
|
||||
if entity == "" {
|
||||
return toAPIError(ctx, fmt.Errorf("error importing %s with: %w", fname, err))
|
||||
}
|
||||
return toAPIError(ctx, fmt.Errorf("error importing %s from %s with: %w", entity, fname, err))
|
||||
}
|
||||
|
||||
@@ -33,7 +33,8 @@ import (
|
||||
"github.com/minio/minio/internal/config/etcd"
|
||||
xldap "github.com/minio/minio/internal/config/identity/ldap"
|
||||
"github.com/minio/minio/internal/config/identity/openid"
|
||||
"github.com/minio/minio/internal/config/policy/opa"
|
||||
idplugin "github.com/minio/minio/internal/config/identity/plugin"
|
||||
polplugin "github.com/minio/minio/internal/config/policy/plugin"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
@@ -97,13 +98,14 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
func applyDynamic(ctx context.Context, objectAPI ObjectLayer, cfg config.Config, subSys string,
|
||||
r *http.Request, w http.ResponseWriter) {
|
||||
r *http.Request, w http.ResponseWriter,
|
||||
) {
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, cfg, subSys); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
globalNotificationSys.SignalConfigReload(subSys)
|
||||
// Tell the client that dynamic config was applied.
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
}
|
||||
@@ -435,14 +437,16 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
off = !cache.Enabled(kv)
|
||||
case config.StorageClassSubSys:
|
||||
off = !storageclass.Enabled(kv)
|
||||
case config.PolicyOPASubSys:
|
||||
off = !opa.Enabled(kv)
|
||||
case config.PolicyPluginSubSys:
|
||||
off = !polplugin.Enabled(kv)
|
||||
case config.IdentityOpenIDSubSys:
|
||||
off = !openid.Enabled(kv)
|
||||
case config.IdentityLDAPSubSys:
|
||||
off = !xldap.Enabled(kv)
|
||||
case config.IdentityTLSSubSys:
|
||||
off = !globalSTSTLSConfig.Enabled
|
||||
case config.IdentityPluginSubSys:
|
||||
off = !idplugin.Enabled(kv)
|
||||
}
|
||||
if off {
|
||||
s.WriteString(config.KvComment)
|
||||
|
||||
@@ -58,6 +58,16 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
if ep := globalEndpoints[idx].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := pools.Decommission(r.Context(), idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -96,6 +106,16 @@ func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
if ep := globalEndpoints[idx].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := pools.DecommissionCancel(ctx, idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
@@ -115,10 +115,12 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request
|
||||
case madmin.MakeWithVersioningBktOp:
|
||||
_, isLockEnabled := r.Form["lockEnabled"]
|
||||
_, isVersioningEnabled := r.Form["versioningEnabled"]
|
||||
_, isForceCreate := r.Form["forceCreate"]
|
||||
opts := BucketOptions{
|
||||
Location: r.Form.Get("location"),
|
||||
LockEnabled: isLockEnabled,
|
||||
VersioningEnabled: isVersioningEnabled,
|
||||
ForceCreate: isForceCreate,
|
||||
}
|
||||
err = globalSiteReplicationSys.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts)
|
||||
case madmin.ConfigureReplBktOp:
|
||||
@@ -239,6 +241,8 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
}
|
||||
case madmin.SRBucketMetaTypeVersionConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketVersioningHandler(ctx, item.Bucket, item.Versioning)
|
||||
case madmin.SRBucketMetaTypeTags:
|
||||
err = globalSiteReplicationSys.PeerBucketTaggingHandler(ctx, item.Bucket, item.Tags)
|
||||
case madmin.SRBucketMetaTypeObjectLockConfig:
|
||||
|
||||
@@ -26,6 +26,7 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -41,11 +42,15 @@ func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
|
||||
}
|
||||
|
||||
func TestIAMInternalIDPConcurrencyServerSuite(t *testing.T) {
|
||||
if runtime.GOOS == globalWindowsOSName {
|
||||
t.Skip("windows is clunky")
|
||||
}
|
||||
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on FS backend with signature v4.
|
||||
{serverType: "FS", signer: signerV4},
|
||||
// Init and run test on FS backend, with tls enabled.
|
||||
{serverType: "FS", signer: signerV4, secure: true},
|
||||
// Init and run test on ErasureSD backend with signature v4.
|
||||
{serverType: "ErasureSD", signer: signerV4},
|
||||
// Init and run test on ErasureSD backend, with tls enabled.
|
||||
{serverType: "ErasureSD", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
@@ -73,7 +78,7 @@ func TestIAMInternalIDPConcurrencyServerSuite(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second)
|
||||
defer cancel()
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
|
||||
@@ -90,7 +90,7 @@ func (a adminAPIHandlers) ListBucketUsers(w http.ResponseWriter, r *http.Request
|
||||
|
||||
password := cred.SecretKey
|
||||
|
||||
allCredentials, err := globalIAMSys.ListBucketUsers(bucket)
|
||||
allCredentials, err := globalIAMSys.ListBucketUsers(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -124,12 +124,24 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
password := cred.SecretKey
|
||||
|
||||
allCredentials, err := globalIAMSys.ListUsers()
|
||||
allCredentials, err := globalIAMSys.ListUsers(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Add ldap users which have mapped policies if in LDAP mode
|
||||
// FIXME(vadmeste): move this to policy info in the future
|
||||
ldapUsers, err := globalIAMSys.ListLDAPUsers()
|
||||
if err != nil && err != errIAMActionNotAllowed {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for k, v := range ldapUsers {
|
||||
allCredentials[k] = v
|
||||
}
|
||||
|
||||
// Marshal the response
|
||||
data, err := json.Marshal(allCredentials)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -229,6 +241,15 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
||||
if updReq.IsRemove {
|
||||
err = globalIAMSys.RemoveUsersFromGroup(ctx, updReq.Group, updReq.Members)
|
||||
} else {
|
||||
// Check if group already exists
|
||||
if _, gerr := globalIAMSys.GetGroupDescription(updReq.Group); gerr != nil {
|
||||
// If group does not exist, then check if the group has beginning and end space characters
|
||||
// we will reject such group names.
|
||||
if errors.Is(gerr, errNoSuchGroup) && hasSpaceBE(updReq.Group) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -430,6 +451,12 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if accessKey has beginning and end space characters, this only applies to new users.
|
||||
if !exists && hasSpaceBE(accessKey) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
checkDenyOnly := false
|
||||
if accessKey == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
@@ -521,6 +548,12 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
// service account access key cannot have space characters beginning and end of the string.
|
||||
if hasSpaceBE(createReq.AccessKey) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
targetUser string
|
||||
targetGroups []string
|
||||
@@ -605,7 +638,6 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
DenyOnly: true,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
@@ -645,29 +677,6 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
// Call hook for cluster-replication if the service account is not for a
|
||||
// root user.
|
||||
if newCred.ParentUser != globalActiveCred.AccessKey {
|
||||
err = globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Create: &madmin.SRSvcAccCreate{
|
||||
Parent: newCred.ParentUser,
|
||||
AccessKey: newCred.AccessKey,
|
||||
SecretKey: newCred.SecretKey,
|
||||
Groups: newCred.Groups,
|
||||
Claims: opts.claims,
|
||||
SessionPolicy: createReq.Policy,
|
||||
Status: auth.AccountOn,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
createResp := madmin.AddServiceAccountResp{
|
||||
Credentials: madmin.Credentials{
|
||||
AccessKey: newCred.AccessKey,
|
||||
@@ -688,6 +697,29 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
|
||||
// Call hook for cluster-replication if the service account is not for a
|
||||
// root user.
|
||||
if newCred.ParentUser != globalActiveCred.AccessKey {
|
||||
err = globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Create: &madmin.SRSvcAccCreate{
|
||||
Parent: newCred.ParentUser,
|
||||
AccessKey: newCred.AccessKey,
|
||||
SecretKey: newCred.SecretKey,
|
||||
Groups: newCred.Groups,
|
||||
Claims: opts.claims,
|
||||
SessionPolicy: createReq.Policy,
|
||||
Status: auth.AccountOn,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateServiceAccount - POST /minio/admin/v3/update-service-account
|
||||
@@ -982,11 +1014,9 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
svcAccount, _, err := globalIAMSys.GetServiceAccount(ctx, serviceAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// We do not care if service account is readable or not at this point,
|
||||
// since this is a delete call we shall allow it to be deleted if possible.
|
||||
svcAccount, _, _ := globalIAMSys.GetServiceAccount(ctx, serviceAccount)
|
||||
|
||||
adminPrivilege := globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
@@ -1001,7 +1031,7 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
if parentUser != svcAccount.ParentUser {
|
||||
if svcAccount.ParentUser != "" && parentUser != svcAccount.ParentUser {
|
||||
// The service account belongs to another user but return not
|
||||
// found error to mitigate brute force attacks. or the
|
||||
// serviceAccount doesn't exist.
|
||||
@@ -1010,23 +1040,21 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
}
|
||||
|
||||
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount, true)
|
||||
if err != nil {
|
||||
if err := globalIAMSys.DeleteServiceAccount(ctx, serviceAccount, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Call site replication hook - non-root user accounts are replicated.
|
||||
if svcAccount.ParentUser != globalActiveCred.AccessKey {
|
||||
err = globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
if svcAccount.ParentUser != "" && svcAccount.ParentUser != globalActiveCred.AccessKey {
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Delete: &madmin.SRSvcAccDelete{
|
||||
AccessKey: serviceAccount,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1077,6 +1105,19 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
rd = true
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.GetBucketLocationAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
rd = true
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
@@ -1171,8 +1212,8 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
lcfg, _ := globalBucketObjectLockSys.Get(bucket.Name)
|
||||
quota, _ := globalBucketQuotaSys.Get(ctx, bucket.Name)
|
||||
rcfg, _ := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket.Name)
|
||||
tcfg, _ := globalBucketMetadataSys.GetTaggingConfig(bucket.Name)
|
||||
rcfg, _, _ := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket.Name)
|
||||
tcfg, _, _ := globalBucketMetadataSys.GetTaggingConfig(bucket.Name)
|
||||
|
||||
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketAccessInfo{
|
||||
Name: bucket.Name,
|
||||
@@ -1373,6 +1414,12 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["name"]
|
||||
|
||||
// Policy has space characters in begin and end reject such inputs.
|
||||
if hasSpaceBE(policyName) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Error out if Content-Length is missing.
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -50,6 +51,8 @@ const (
|
||||
type TestSuiteIAM struct {
|
||||
TestSuiteCommon
|
||||
|
||||
ServerTypeDescription string
|
||||
|
||||
// Flag to turn on tests for etcd backend IAM
|
||||
withEtcdBackend bool
|
||||
|
||||
@@ -59,7 +62,15 @@ type TestSuiteIAM struct {
|
||||
}
|
||||
|
||||
func newTestSuiteIAM(c TestSuiteCommon, withEtcdBackend bool) *TestSuiteIAM {
|
||||
return &TestSuiteIAM{TestSuiteCommon: c, withEtcdBackend: withEtcdBackend}
|
||||
etcdStr := ""
|
||||
if withEtcdBackend {
|
||||
etcdStr = " (with etcd backend)"
|
||||
}
|
||||
return &TestSuiteIAM{
|
||||
TestSuiteCommon: c,
|
||||
ServerTypeDescription: fmt.Sprintf("%s%s", c.serverType, etcdStr),
|
||||
withEtcdBackend: withEtcdBackend,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) iamSetup(c *check) {
|
||||
@@ -87,6 +98,29 @@ func (s *TestSuiteIAM) iamSetup(c *check) {
|
||||
}
|
||||
}
|
||||
|
||||
// List of all IAM test suites (i.e. test server configuration combinations)
|
||||
// common to tests.
|
||||
var iamTestSuites = func() []*TestSuiteIAM {
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on ErasureSD backend with signature v4.
|
||||
{serverType: "ErasureSD", signer: signerV4},
|
||||
// Init and run test on ErasureSD backend, with tls enabled.
|
||||
{serverType: "ErasureSD", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
{serverType: "ErasureSet", signer: signerV4},
|
||||
}
|
||||
testCases := []*TestSuiteIAM{}
|
||||
for _, bt := range baseTestCases {
|
||||
testCases = append(testCases,
|
||||
newTestSuiteIAM(bt, false),
|
||||
newTestSuiteIAM(bt, true),
|
||||
)
|
||||
}
|
||||
return testCases
|
||||
}()
|
||||
|
||||
const (
|
||||
EnvTestEtcdBackend = "ETCD_SERVER"
|
||||
)
|
||||
@@ -156,30 +190,12 @@ func (s *TestSuiteIAM) getUserClient(c *check, accessKey, secretKey, sessionToke
|
||||
}
|
||||
|
||||
func TestIAMInternalIDPServerSuite(t *testing.T) {
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on FS backend with signature v4.
|
||||
{serverType: "FS", signer: signerV4},
|
||||
// Init and run test on FS backend, with tls enabled.
|
||||
{serverType: "FS", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
{serverType: "ErasureSet", signer: signerV4},
|
||||
if runtime.GOOS == globalWindowsOSName {
|
||||
t.Skip("windows is clunky disable these tests")
|
||||
}
|
||||
testCases := []*TestSuiteIAM{}
|
||||
for _, bt := range baseTestCases {
|
||||
testCases = append(testCases,
|
||||
newTestSuiteIAM(bt, false),
|
||||
newTestSuiteIAM(bt, true),
|
||||
)
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
etcdStr := ""
|
||||
if testCase.withEtcdBackend {
|
||||
etcdStr = " (with etcd backend)"
|
||||
}
|
||||
for i, testCase := range iamTestSuites {
|
||||
t.Run(
|
||||
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
|
||||
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
|
||||
func(t *testing.T) {
|
||||
suite := testCase
|
||||
c := &check{t, testCase.serverType}
|
||||
@@ -226,6 +242,7 @@ func (s *TestSuiteIAM) TestUserCreate(c *check) {
|
||||
if err != nil {
|
||||
c.Fatalf("unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
client := s.getUserClient(c, accessKey, secretKey, "")
|
||||
err = client.MakeBucket(ctx, getRandomBucketName(), minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
@@ -890,6 +907,9 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
|
||||
// 5. Check that service account can be deleted.
|
||||
c.assertSvcAccDeletion(ctx, s, userAdmClient, accessKey, bucket)
|
||||
|
||||
// 6. Check that service account cannot be created for some other user.
|
||||
c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
@@ -960,6 +980,166 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
c.assertSvcAccDeletion(ctx, s, s.adm, accessKey, bucket)
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) SetUpAccMgmtPlugin(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
pluginEndpoint := os.Getenv("POLICY_PLUGIN_ENDPOINT")
|
||||
if pluginEndpoint == "" {
|
||||
c.Skip("POLICY_PLUGIN_ENDPOINT not given - skipping.")
|
||||
}
|
||||
|
||||
configCmds := []string{
|
||||
"policy_plugin",
|
||||
"url=" + pluginEndpoint,
|
||||
}
|
||||
|
||||
_, err := s.adm.SetConfigKV(ctx, strings.Join(configCmds, " "))
|
||||
if err != nil {
|
||||
c.Fatalf("unable to setup access management plugin for tests: %v", err)
|
||||
}
|
||||
|
||||
s.RestartIAMSuite(c)
|
||||
}
|
||||
|
||||
// TestIAM_AMPInternalIDPServerSuite - tests for access management plugin
|
||||
func TestIAM_AMPInternalIDPServerSuite(t *testing.T) {
|
||||
for i, testCase := range iamTestSuites {
|
||||
t.Run(
|
||||
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
|
||||
func(t *testing.T) {
|
||||
suite := testCase
|
||||
c := &check{t, testCase.serverType}
|
||||
|
||||
suite.SetUpSuite(c)
|
||||
defer suite.TearDownSuite(c)
|
||||
|
||||
suite.SetUpAccMgmtPlugin(c)
|
||||
|
||||
suite.TestAccMgmtPlugin(c)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAccMgmtPlugin - this test assumes that the access-management-plugin is
|
||||
// the same as the example in `docs/iam/access-manager-plugin.go` -
|
||||
// specifically, it denies only `s3:Put*` operations on non-root accounts.
|
||||
func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
// 0. Check that owner is able to make-bucket.
|
||||
bucket := getRandomBucketName()
|
||||
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
// 1. Create a user.
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
// 2. Check new user appears in listing
|
||||
usersMap, err := s.adm.ListUsers(ctx)
|
||||
if err != nil {
|
||||
c.Fatalf("error listing: %v", err)
|
||||
}
|
||||
v, ok := usersMap[accessKey]
|
||||
if !ok {
|
||||
c.Fatalf("user not listed: %s", accessKey)
|
||||
}
|
||||
c.Assert(v.Status, madmin.AccountEnabled)
|
||||
|
||||
// 3. Check that user is able to make a bucket.
|
||||
client := s.getUserClient(c, accessKey, secretKey, "")
|
||||
err = client.MakeBucket(ctx, getRandomBucketName(), minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("user not create bucket: %v", err)
|
||||
}
|
||||
|
||||
// 3.1 check user has access to bucket
|
||||
c.mustListObjects(ctx, client, bucket)
|
||||
|
||||
// 3.2 check that user cannot upload an object.
|
||||
_, err = client.PutObject(ctx, bucket, "objectName", bytes.NewBuffer([]byte("some content")), 12, minio.PutObjectOptions{})
|
||||
if err == nil {
|
||||
c.Fatalf("user was able to upload unexpectedly")
|
||||
}
|
||||
|
||||
// Create an madmin client with user creds
|
||||
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
|
||||
Creds: cr.NewStaticV4(accessKey, secretKey, ""),
|
||||
Secure: s.secure,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Err creating user admin client: %v", err)
|
||||
}
|
||||
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
|
||||
|
||||
// Create svc acc
|
||||
cr := c.mustCreateSvcAccount(ctx, accessKey, userAdmClient)
|
||||
|
||||
// 1. Check that svc account appears in listing
|
||||
c.assertSvcAccAppearsInListing(ctx, userAdmClient, accessKey, cr.AccessKey)
|
||||
|
||||
// 2. Check that svc account info can be queried
|
||||
c.assertSvcAccInfoQueryable(ctx, userAdmClient, accessKey, cr.AccessKey, false)
|
||||
|
||||
// 3. Check S3 access
|
||||
c.assertSvcAccS3Access(ctx, s, cr, bucket)
|
||||
|
||||
// Check that session policies do not apply - as policy enforcement is
|
||||
// delegated to plugin.
|
||||
{
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
|
||||
// This policy does not allow listing objects.
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
Policy: policyBytes,
|
||||
TargetUser: accessKey,
|
||||
AccessKey: svcAK,
|
||||
SecretKey: svcSK,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to create svc acc: %v", err)
|
||||
}
|
||||
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
|
||||
// Though the attached policy does not allow listing, it will be
|
||||
// ignored because the plugin allows it.
|
||||
c.mustListObjects(ctx, svcClient, bucket)
|
||||
}
|
||||
|
||||
// 4. Check that service account's secret key and account status can be
|
||||
// updated.
|
||||
c.assertSvcAccSecretKeyAndStatusUpdate(ctx, s, userAdmClient, accessKey, bucket)
|
||||
|
||||
// 5. Check that service account can be deleted.
|
||||
c.assertSvcAccDeletion(ctx, s, userAdmClient, accessKey, bucket)
|
||||
|
||||
// 6. Check that service account **can** be created for some other user.
|
||||
// This is possible because of the policy enforced in the plugin.
|
||||
c.mustCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
|
||||
}
|
||||
|
||||
func (c *check) mustCreateIAMUser(ctx context.Context, admClnt *madmin.AdminClient) madmin.Credentials {
|
||||
randUser := mustGetUUID()
|
||||
randPass := mustGetUUID()
|
||||
@@ -1013,7 +1193,7 @@ func (c *check) mustNotListObjects(ctx context.Context, client *minio.Client, bu
|
||||
res := client.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok := <-res
|
||||
if !ok || v.Err == nil {
|
||||
c.Fatalf("user was able to list unexpectedly!")
|
||||
c.Fatalf("user was able to list unexpectedly! on %s", bucket)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1021,11 +1201,20 @@ func (c *check) mustListObjects(ctx context.Context, client *minio.Client, bucke
|
||||
res := client.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok := <-res
|
||||
if ok && v.Err != nil {
|
||||
msg := fmt.Sprintf("user was unable to list: %v", v.Err)
|
||||
c.Fatalf(msg)
|
||||
c.Fatalf("user was unable to list: %v", v.Err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) mustNotUpload(ctx context.Context, client *minio.Client, bucket string) {
|
||||
_, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
|
||||
if e, ok := err.(minio.ErrorResponse); ok {
|
||||
if e.Code == "AccessDenied" {
|
||||
return
|
||||
}
|
||||
}
|
||||
c.Fatalf("upload did not get an AccessDenied error - got %#v instead", err)
|
||||
}
|
||||
|
||||
func (c *check) assertSvcAccS3Access(ctx context.Context, s *TestSuiteIAM, cr madmin.Credentials, bucket string) {
|
||||
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
|
||||
c.mustListObjects(ctx, svcClient, bucket)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2022 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@@ -18,15 +18,16 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -37,7 +38,6 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
@@ -56,7 +56,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
maxEConfigJSONSize = 262272
|
||||
maxEConfigJSONSize = 262272
|
||||
kubernetesVersionEndpoint = "https://kubernetes.default.svc/version"
|
||||
)
|
||||
|
||||
// Only valid query params for mgmt admin APIs.
|
||||
@@ -273,6 +274,8 @@ type ServerConnStats struct {
|
||||
Throughput uint64 `json:"throughput,omitempty"`
|
||||
S3InputBytes uint64 `json:"transferredS3"`
|
||||
S3OutputBytes uint64 `json:"receivedS3"`
|
||||
AdminInputBytes uint64 `json:"transferredAdmin"`
|
||||
AdminOutputBytes uint64 `json:"receivedAdmin"`
|
||||
}
|
||||
|
||||
// ServerHTTPAPIStats holds total number of HTTP operations from/to the server,
|
||||
@@ -289,6 +292,8 @@ type ServerHTTPStats struct {
|
||||
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
|
||||
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
|
||||
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
|
||||
TotalS35xxErrors ServerHTTPAPIStats `json:"totalS35xxErrors"`
|
||||
TotalS34xxErrors ServerHTTPAPIStats `json:"totalS34xxErrors"`
|
||||
TotalS3Canceled ServerHTTPAPIStats `json:"totalS3Canceled"`
|
||||
TotalS3RejectedAuth uint64 `json:"totalS3RejectedAuth"`
|
||||
TotalS3RejectedTime uint64 `json:"totalS3RejectedTime"`
|
||||
@@ -500,7 +505,7 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
// StartProfilingResult contains the status of the starting
|
||||
// profiling action in a given server
|
||||
// profiling action in a given server - deprecated API
|
||||
type StartProfilingResult struct {
|
||||
NodeName string `json:"nodeName"`
|
||||
Success bool `json:"success"`
|
||||
@@ -594,6 +599,83 @@ func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.R
|
||||
writeSuccessResponseJSON(w, startProfilingResultInBytes)
|
||||
}
|
||||
|
||||
// ProfileHandler - POST /minio/admin/v3/profile/?profilerType={profilerType}
|
||||
// ----------
|
||||
// Enable server profiling
|
||||
func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "Profile")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ProfilingAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
profileStr := r.Form.Get("profilerType")
|
||||
profiles := strings.Split(profileStr, ",")
|
||||
duration := time.Minute
|
||||
if dstr := r.Form.Get("duration"); dstr != "" {
|
||||
var err error
|
||||
duration, err = time.ParseDuration(dstr)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
// read request body
|
||||
io.CopyN(ioutil.Discard, r.Body, 1)
|
||||
|
||||
globalProfilerMu.Lock()
|
||||
|
||||
if globalProfiler == nil {
|
||||
globalProfiler = make(map[string]minioProfiler, 10)
|
||||
}
|
||||
|
||||
// Stop profiler of all types if already running
|
||||
for k, v := range globalProfiler {
|
||||
v.Stop()
|
||||
delete(globalProfiler, k)
|
||||
}
|
||||
|
||||
// Start profiling on remote servers.
|
||||
for _, profiler := range profiles {
|
||||
globalNotificationSys.StartProfiling(profiler)
|
||||
|
||||
// Start profiling locally as well.
|
||||
prof, err := startProfiler(profiler)
|
||||
if err == nil {
|
||||
globalProfiler[profiler] = prof
|
||||
}
|
||||
}
|
||||
globalProfilerMu.Unlock()
|
||||
|
||||
timer := time.NewTimer(duration)
|
||||
defer timer.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
for k, v := range globalProfiler {
|
||||
v.Stop()
|
||||
delete(globalProfiler, k)
|
||||
}
|
||||
return
|
||||
case <-timer.C:
|
||||
if !globalNotificationSys.DownloadProfilingData(ctx, w) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminProfilerNotEnabled), r.URL)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dummyFileInfo represents a dummy representation of a profile data file
|
||||
// present only in memory, it helps to generate the zip stream.
|
||||
type dummyFileInfo struct {
|
||||
@@ -614,7 +696,7 @@ func (f dummyFileInfo) Sys() interface{} { return f.sys }
|
||||
|
||||
// DownloadProfilingHandler - POST /minio/admin/v3/profiling/download
|
||||
// ----------
|
||||
// Download profiling information of all nodes in a zip format
|
||||
// Download profiling information of all nodes in a zip format - deprecated API
|
||||
func (a adminAPIHandlers) DownloadProfilingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DownloadProfiling")
|
||||
|
||||
@@ -726,8 +808,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this setup has an erasure coded backend.
|
||||
if !globalIsErasure {
|
||||
if globalIsGateway {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -919,7 +1000,7 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
// Check if this setup has an erasure coded backend.
|
||||
if !globalIsErasure {
|
||||
if globalIsGateway {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -999,7 +1080,7 @@ func (a adminAPIHandlers) ObjectSpeedtestHandler(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIsErasure {
|
||||
if globalIsGateway {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1029,38 +1110,29 @@ func (a adminAPIHandlers) ObjectSpeedtestHandler(w http.ResponseWriter, r *http.
|
||||
duration = time.Second * 10
|
||||
}
|
||||
|
||||
// ignores any errors here.
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx)
|
||||
capacityNeeded := uint64(concurrent * size)
|
||||
capacity := uint64(GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo))
|
||||
|
||||
if capacity < capacityNeeded {
|
||||
sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, objectAPI, concurrent, size, autotune)
|
||||
if !sufficientCapacity {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{
|
||||
Code: "XMinioSpeedtestInsufficientCapacity",
|
||||
Message: fmt.Sprintf("not enough usable space available to perform speedtest - expected %s, got %s",
|
||||
humanize.IBytes(capacityNeeded), humanize.IBytes(capacity)),
|
||||
Code: "XMinioSpeedtestInsufficientCapacity",
|
||||
Message: capacityErrMsg,
|
||||
StatusCode: http.StatusInsufficientStorage,
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify if we can employ autotune without running out of capacity,
|
||||
// if we do run out of capacity, make sure to turn-off autotuning
|
||||
// in such situations.
|
||||
newConcurrent := concurrent + (concurrent+1)/2
|
||||
autoTunedCapacityNeeded := uint64(newConcurrent * size)
|
||||
if autotune && capacity < autoTunedCapacityNeeded {
|
||||
// Turn-off auto-tuning if next possible concurrency would reach beyond disk capacity.
|
||||
if autotune && !canAutotune {
|
||||
autotune = false
|
||||
}
|
||||
|
||||
deleteBucket := func() {
|
||||
objectAPI.DeleteBucket(context.Background(), pathJoin(minioMetaBucket, "speedtest"), DeleteBucketOptions{
|
||||
Force: true,
|
||||
NoRecreate: true,
|
||||
})
|
||||
bucketExists, err := makeObjectPerfBucket(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !bucketExists {
|
||||
defer deleteObjectPerfBucket(objectAPI)
|
||||
}
|
||||
defer deleteBucket()
|
||||
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
@@ -1072,7 +1144,7 @@ func (a adminAPIHandlers) ObjectSpeedtestHandler(w http.ResponseWriter, r *http.
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
ch := speedTest(ctx, speedTestOpts{size, concurrent, duration, autotune, storageClass})
|
||||
ch := objectSpeedTest(ctx, speedTestOpts{size, concurrent, duration, autotune, storageClass})
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -1095,6 +1167,50 @@ func (a adminAPIHandlers) ObjectSpeedtestHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
}
|
||||
|
||||
func makeObjectPerfBucket(ctx context.Context, objectAPI ObjectLayer) (bucketExists bool, err error) {
|
||||
err = objectAPI.MakeBucketWithLocation(ctx, globalObjectPerfBucket, BucketOptions{})
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
// Only BucketExists error can be ignored.
|
||||
return false, err
|
||||
}
|
||||
bucketExists = true
|
||||
}
|
||||
return bucketExists, nil
|
||||
}
|
||||
|
||||
func deleteObjectPerfBucket(objectAPI ObjectLayer) {
|
||||
objectAPI.DeleteBucket(context.Background(), globalObjectPerfBucket, DeleteBucketOptions{
|
||||
Force: true,
|
||||
NoRecreate: true,
|
||||
})
|
||||
}
|
||||
|
||||
func validateObjPerfOptions(ctx context.Context, objectAPI ObjectLayer, concurrent int, size int, autotune bool) (sufficientCapacity bool, canAutotune bool, capacityErrMsg string) {
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx)
|
||||
capacityNeeded := uint64(concurrent * size)
|
||||
capacity := uint64(GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo))
|
||||
|
||||
if capacity < capacityNeeded {
|
||||
return false, false, fmt.Sprintf("not enough usable space available to perform speedtest - expected %s, got %s",
|
||||
humanize.IBytes(capacityNeeded), humanize.IBytes(capacity))
|
||||
}
|
||||
|
||||
// Verify if we can employ autotune without running out of capacity,
|
||||
// if we do run out of capacity, make sure to turn-off autotuning
|
||||
// in such situations.
|
||||
if autotune {
|
||||
newConcurrent := concurrent + (concurrent+1)/2
|
||||
autoTunedCapacityNeeded := uint64(newConcurrent * size)
|
||||
if capacity < autoTunedCapacityNeeded {
|
||||
// Turn-off auto-tuning if next possible concurrency would reach beyond disk capacity.
|
||||
return true, false, ""
|
||||
}
|
||||
}
|
||||
|
||||
return true, autotune, ""
|
||||
}
|
||||
|
||||
// NetSpeedtestHandler - reports maximum network throughput
|
||||
func (a adminAPIHandlers) NetSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "NetSpeedtestHandler")
|
||||
@@ -1113,7 +1229,7 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIsErasure {
|
||||
if globalIsGateway {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1147,43 +1263,29 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
ch := globalNotificationSys.DriveSpeedTest(ctx, opts)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// local driveSpeedTest
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
enc.Encode(driveSpeedTest(ctx, opts))
|
||||
if wf, ok := w.(http.Flusher); ok {
|
||||
wf.Flush()
|
||||
}
|
||||
}()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
goto endloop
|
||||
return
|
||||
case <-keepAliveTicker.C:
|
||||
// Write a blank entry to prevent client from disconnecting
|
||||
if err := enc.Encode(madmin.DriveSpeedTestResult{}); err != nil {
|
||||
goto endloop
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case result, ok := <-ch:
|
||||
if !ok {
|
||||
goto endloop
|
||||
return
|
||||
}
|
||||
if err := enc.Encode(result); err != nil {
|
||||
goto endloop
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
endloop:
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Admin API errors
|
||||
@@ -1295,9 +1397,15 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
peers, _ := newPeerRestClients(globalEndpoints)
|
||||
|
||||
globalTrace.Subscribe(traceCh, ctx.Done(), func(entry interface{}) bool {
|
||||
traceFn := func(entry interface{}) bool {
|
||||
return mustTrace(entry, traceOpts)
|
||||
})
|
||||
}
|
||||
|
||||
err = globalTrace.Subscribe(traceCh, ctx.Done(), traceFn)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrSlowDown), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
for _, peer := range peers {
|
||||
if peer == nil {
|
||||
@@ -1369,7 +1477,11 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
peers, _ := newPeerRestClients(globalEndpoints)
|
||||
|
||||
globalConsoleSys.Subscribe(logCh, ctx.Done(), node, limitLines, logKind, nil)
|
||||
err = globalConsoleSys.Subscribe(logCh, ctx.Done(), node, limitLines, logKind, nil)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrSlowDown), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
for _, peer := range peers {
|
||||
if peer == nil {
|
||||
@@ -1583,6 +1695,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
|
||||
|
||||
buckets := madmin.Buckets{}
|
||||
objects := madmin.Objects{}
|
||||
versions := madmin.Versions{}
|
||||
usage := madmin.Usage{}
|
||||
|
||||
objectAPI := newObjectLayerFn()
|
||||
@@ -1594,6 +1707,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
|
||||
if err == nil {
|
||||
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
|
||||
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
|
||||
versions = madmin.Versions{Count: dataUsageInfo.VersionsTotalCount}
|
||||
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
|
||||
} else {
|
||||
buckets = madmin.Buckets{Error: err.Error()}
|
||||
@@ -1642,6 +1756,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
|
||||
DeploymentID: globalDeploymentID,
|
||||
Buckets: buckets,
|
||||
Objects: objects,
|
||||
Versions: versions,
|
||||
Usage: usage,
|
||||
Services: services,
|
||||
Backend: backend,
|
||||
@@ -1649,6 +1764,36 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
|
||||
}
|
||||
}
|
||||
|
||||
func getKubernetesInfo(dctx context.Context) madmin.KubernetesInfo {
|
||||
ctx, cancel := context.WithCancel(dctx)
|
||||
defer cancel()
|
||||
|
||||
ki := madmin.KubernetesInfo{}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, kubernetesVersionEndpoint, nil)
|
||||
if err != nil {
|
||||
ki.Error = err.Error()
|
||||
return ki
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: NewGatewayHTTPTransport(),
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
ki.Error = err.Error()
|
||||
return ki
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
if err := decoder.Decode(&ki); err != nil {
|
||||
ki.Error = err.Error()
|
||||
}
|
||||
return ki
|
||||
}
|
||||
|
||||
// HealthInfoHandler - GET /minio/admin/v3/healthinfo
|
||||
// ----------
|
||||
// Get server health info
|
||||
@@ -1663,13 +1808,17 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
query := r.Form
|
||||
healthInfo := madmin.HealthInfo{Version: madmin.HealthInfoVersion}
|
||||
healthInfo := madmin.HealthInfo{
|
||||
Version: madmin.HealthInfoVersion,
|
||||
Minio: madmin.MinioHealthInfo{
|
||||
Info: madmin.MinioInfo{
|
||||
DeploymentID: globalDeploymentID,
|
||||
},
|
||||
},
|
||||
}
|
||||
healthInfoCh := make(chan madmin.HealthInfo)
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
partialWrite := func(oinfo madmin.HealthInfo) {
|
||||
healthInfoCh <- oinfo
|
||||
}
|
||||
|
||||
setCommonHeaders(w)
|
||||
|
||||
@@ -1685,7 +1834,12 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
logger.LogIf(ctx, enc.Encode(healthInfo))
|
||||
}
|
||||
|
||||
deadline := 1 * time.Hour
|
||||
deadline := 10 * time.Second // Default deadline is 10secs for health diagnostics.
|
||||
if query.Get(string(madmin.HealthDataTypePerfNet)) != "" ||
|
||||
query.Get(string(madmin.HealthDataTypePerfDrive)) != "" ||
|
||||
query.Get(string(madmin.HealthDataTypePerfObj)) != "" {
|
||||
deadline = 1 * time.Hour
|
||||
}
|
||||
if dstr := r.Form.Get("deadline"); dstr != "" {
|
||||
var err error
|
||||
deadline, err = time.ParseDuration(dstr)
|
||||
@@ -1695,9 +1849,6 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
|
||||
deadlinedCtx, deadlineCancel := context.WithTimeout(ctx, deadline)
|
||||
defer deadlineCancel()
|
||||
|
||||
nsLock := objectAPI.NewNSLock(minioMetaBucket, "health-check-in-progress")
|
||||
lkctx, err := nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline))
|
||||
if err != nil { // returns a locked lock
|
||||
@@ -1706,6 +1857,15 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
defer nsLock.Unlock(lkctx.Cancel)
|
||||
|
||||
healthCtx, healthCancel := context.WithTimeout(lkctx.Context(), deadline)
|
||||
defer healthCancel()
|
||||
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
// unfreeze all incoming S3 API calls after speedtest.
|
||||
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
|
||||
hostAnonymizer := createHostAnonymizer()
|
||||
// anonAddr - Anonymizes hosts in given input string.
|
||||
anonAddr := func(addr string) string {
|
||||
@@ -1724,13 +1884,27 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
info.SetAddr(anonAddr(info.GetAddr()))
|
||||
}
|
||||
|
||||
partialWrite := func(oinfo madmin.HealthInfo) {
|
||||
select {
|
||||
case healthInfoCh <- oinfo:
|
||||
case <-healthCtx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
getAndWritePlatformInfo := func() {
|
||||
if IsKubernetes() {
|
||||
healthInfo.Sys.KubernetesInfo = getKubernetesInfo(healthCtx)
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
}
|
||||
|
||||
getAndWriteCPUs := func() {
|
||||
if query.Get("syscpu") == "true" {
|
||||
localCPUInfo := madmin.GetCPUs(deadlinedCtx, globalLocalNodeName)
|
||||
localCPUInfo := madmin.GetCPUs(healthCtx, globalLocalNodeName)
|
||||
anonymizeAddr(&localCPUInfo)
|
||||
healthInfo.Sys.CPUInfo = append(healthInfo.Sys.CPUInfo, localCPUInfo)
|
||||
|
||||
peerCPUInfo := globalNotificationSys.GetCPUs(deadlinedCtx)
|
||||
peerCPUInfo := globalNotificationSys.GetCPUs(healthCtx)
|
||||
for _, cpuInfo := range peerCPUInfo {
|
||||
anonymizeAddr(&cpuInfo)
|
||||
healthInfo.Sys.CPUInfo = append(healthInfo.Sys.CPUInfo, cpuInfo)
|
||||
@@ -1742,11 +1916,11 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
getAndWritePartitions := func() {
|
||||
if query.Get("sysdrivehw") == "true" {
|
||||
localPartitions := madmin.GetPartitions(deadlinedCtx, globalLocalNodeName)
|
||||
localPartitions := madmin.GetPartitions(healthCtx, globalLocalNodeName)
|
||||
anonymizeAddr(&localPartitions)
|
||||
healthInfo.Sys.Partitions = append(healthInfo.Sys.Partitions, localPartitions)
|
||||
|
||||
peerPartitions := globalNotificationSys.GetPartitions(deadlinedCtx)
|
||||
peerPartitions := globalNotificationSys.GetPartitions(healthCtx)
|
||||
for _, p := range peerPartitions {
|
||||
anonymizeAddr(&p)
|
||||
healthInfo.Sys.Partitions = append(healthInfo.Sys.Partitions, p)
|
||||
@@ -1757,11 +1931,11 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
getAndWriteOSInfo := func() {
|
||||
if query.Get("sysosinfo") == "true" {
|
||||
localOSInfo := madmin.GetOSInfo(deadlinedCtx, globalLocalNodeName)
|
||||
localOSInfo := madmin.GetOSInfo(healthCtx, globalLocalNodeName)
|
||||
anonymizeAddr(&localOSInfo)
|
||||
healthInfo.Sys.OSInfo = append(healthInfo.Sys.OSInfo, localOSInfo)
|
||||
|
||||
peerOSInfos := globalNotificationSys.GetOSInfo(deadlinedCtx)
|
||||
peerOSInfos := globalNotificationSys.GetOSInfo(healthCtx)
|
||||
for _, o := range peerOSInfos {
|
||||
anonymizeAddr(&o)
|
||||
healthInfo.Sys.OSInfo = append(healthInfo.Sys.OSInfo, o)
|
||||
@@ -1772,11 +1946,11 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
getAndWriteMemInfo := func() {
|
||||
if query.Get("sysmem") == "true" {
|
||||
localMemInfo := madmin.GetMemInfo(deadlinedCtx, globalLocalNodeName)
|
||||
localMemInfo := madmin.GetMemInfo(healthCtx, globalLocalNodeName)
|
||||
anonymizeAddr(&localMemInfo)
|
||||
healthInfo.Sys.MemInfo = append(healthInfo.Sys.MemInfo, localMemInfo)
|
||||
|
||||
peerMemInfos := globalNotificationSys.GetMemInfo(deadlinedCtx)
|
||||
peerMemInfos := globalNotificationSys.GetMemInfo(healthCtx)
|
||||
for _, m := range peerMemInfos {
|
||||
anonymizeAddr(&m)
|
||||
healthInfo.Sys.MemInfo = append(healthInfo.Sys.MemInfo, m)
|
||||
@@ -1787,12 +1961,12 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
getAndWriteSysErrors := func() {
|
||||
if query.Get(string(madmin.HealthDataTypeSysErrors)) == "true" {
|
||||
localSysErrors := madmin.GetSysErrors(deadlinedCtx, globalLocalNodeName)
|
||||
localSysErrors := madmin.GetSysErrors(healthCtx, globalLocalNodeName)
|
||||
anonymizeAddr(&localSysErrors)
|
||||
healthInfo.Sys.SysErrs = append(healthInfo.Sys.SysErrs, localSysErrors)
|
||||
partialWrite(healthInfo)
|
||||
|
||||
peerSysErrs := globalNotificationSys.GetSysErrors(deadlinedCtx)
|
||||
peerSysErrs := globalNotificationSys.GetSysErrors(healthCtx)
|
||||
for _, se := range peerSysErrs {
|
||||
anonymizeAddr(&se)
|
||||
healthInfo.Sys.SysErrs = append(healthInfo.Sys.SysErrs, se)
|
||||
@@ -1803,12 +1977,12 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
getAndWriteSysConfig := func() {
|
||||
if query.Get(string(madmin.HealthDataTypeSysConfig)) == "true" {
|
||||
localSysConfig := madmin.GetSysConfig(deadlinedCtx, globalLocalNodeName)
|
||||
localSysConfig := madmin.GetSysConfig(healthCtx, globalLocalNodeName)
|
||||
anonymizeAddr(&localSysConfig)
|
||||
healthInfo.Sys.SysConfig = append(healthInfo.Sys.SysConfig, localSysConfig)
|
||||
partialWrite(healthInfo)
|
||||
|
||||
peerSysConfig := globalNotificationSys.GetSysConfig(deadlinedCtx)
|
||||
peerSysConfig := globalNotificationSys.GetSysConfig(healthCtx)
|
||||
for _, sc := range peerSysConfig {
|
||||
anonymizeAddr(&sc)
|
||||
healthInfo.Sys.SysConfig = append(healthInfo.Sys.SysConfig, sc)
|
||||
@@ -1819,12 +1993,12 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
getAndWriteSysServices := func() {
|
||||
if query.Get(string(madmin.HealthDataTypeSysServices)) == "true" {
|
||||
localSysServices := madmin.GetSysServices(deadlinedCtx, globalLocalNodeName)
|
||||
localSysServices := madmin.GetSysServices(healthCtx, globalLocalNodeName)
|
||||
anonymizeAddr(&localSysServices)
|
||||
healthInfo.Sys.SysServices = append(healthInfo.Sys.SysServices, localSysServices)
|
||||
partialWrite(healthInfo)
|
||||
|
||||
peerSysServices := globalNotificationSys.GetSysServices(deadlinedCtx)
|
||||
peerSysServices := globalNotificationSys.GetSysServices(healthCtx)
|
||||
for _, ss := range peerSysServices {
|
||||
anonymizeAddr(&ss)
|
||||
healthInfo.Sys.SysServices = append(healthInfo.Sys.SysServices, ss)
|
||||
@@ -1899,10 +2073,10 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
getAndWriteProcInfo := func() {
|
||||
if query.Get("sysprocess") == "true" {
|
||||
localProcInfo := madmin.GetProcInfo(deadlinedCtx, globalLocalNodeName)
|
||||
localProcInfo := madmin.GetProcInfo(healthCtx, globalLocalNodeName)
|
||||
anonymizeProcInfo(&localProcInfo)
|
||||
healthInfo.Sys.ProcInfo = append(healthInfo.Sys.ProcInfo, localProcInfo)
|
||||
peerProcInfos := globalNotificationSys.GetProcInfo(deadlinedCtx)
|
||||
peerProcInfos := globalNotificationSys.GetProcInfo(healthCtx)
|
||||
for _, p := range peerProcInfos {
|
||||
anonymizeProcInfo(&p)
|
||||
healthInfo.Sys.ProcInfo = append(healthInfo.Sys.ProcInfo, p)
|
||||
@@ -1928,49 +2102,84 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
getAndWriteDrivePerfInfo := func() {
|
||||
if query.Get("perfdrive") == "true" {
|
||||
localDPI := getDrivePerfInfos(deadlinedCtx, globalLocalNodeName)
|
||||
anonymizeAddr(&localDPI)
|
||||
healthInfo.Perf.Drives = append(healthInfo.Perf.Drives, localDPI)
|
||||
partialWrite(healthInfo)
|
||||
|
||||
perfCh := globalNotificationSys.GetDrivePerfInfos(deadlinedCtx)
|
||||
for perfInfo := range perfCh {
|
||||
anonymizeAddr(&perfInfo)
|
||||
healthInfo.Perf.Drives = append(healthInfo.Perf.Drives, perfInfo)
|
||||
partialWrite(healthInfo)
|
||||
if query.Get(string(madmin.HealthDataTypePerfDrive)) == "true" {
|
||||
opts := madmin.DriveSpeedTestOpts{
|
||||
Serial: false,
|
||||
BlockSize: 4 * humanize.MiByte,
|
||||
FileSize: 1 * humanize.GiByte,
|
||||
}
|
||||
|
||||
localDPI := driveSpeedTest(ctx, opts)
|
||||
healthInfo.Perf.DrivePerf = append(healthInfo.Perf.DrivePerf, localDPI)
|
||||
|
||||
perfCh := globalNotificationSys.DriveSpeedTest(ctx, opts)
|
||||
for perfInfo := range perfCh {
|
||||
healthInfo.Perf.DrivePerf = append(healthInfo.Perf.DrivePerf, perfInfo)
|
||||
}
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
}
|
||||
|
||||
anonymizeNetPerfInfo := func(npi *madmin.NetPerfInfo) {
|
||||
anonymizeAddr(npi)
|
||||
rps := npi.RemotePeers
|
||||
for idx, peer := range rps {
|
||||
anonymizeAddr(&peer)
|
||||
rps[idx] = peer
|
||||
getAndWriteObjPerfInfo := func() {
|
||||
if query.Get(string(madmin.HealthDataTypePerfObj)) == "true" {
|
||||
concurrent := 32
|
||||
if runtime.GOMAXPROCS(0) < concurrent {
|
||||
concurrent = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
|
||||
size := 64 * humanize.MiByte
|
||||
autotune := true
|
||||
|
||||
sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, objectAPI, concurrent, size, autotune)
|
||||
|
||||
if !sufficientCapacity {
|
||||
healthInfo.Perf.Error = capacityErrMsg
|
||||
partialWrite(healthInfo)
|
||||
return
|
||||
}
|
||||
|
||||
if !canAutotune {
|
||||
autotune = false
|
||||
}
|
||||
|
||||
bucketExists, err := makeObjectPerfBucket(ctx, objectAPI)
|
||||
if err != nil {
|
||||
healthInfo.Perf.Error = "Could not make object perf bucket: " + err.Error()
|
||||
partialWrite(healthInfo)
|
||||
return
|
||||
}
|
||||
|
||||
if !bucketExists {
|
||||
defer deleteObjectPerfBucket(objectAPI)
|
||||
}
|
||||
|
||||
opts := speedTestOpts{
|
||||
throughputSize: size,
|
||||
concurrencyStart: concurrent,
|
||||
duration: 10 * time.Second,
|
||||
autotune: autotune,
|
||||
}
|
||||
|
||||
perfCh := objectSpeedTest(ctx, opts)
|
||||
for perfInfo := range perfCh {
|
||||
healthInfo.Perf.ObjPerf = append(healthInfo.Perf.ObjPerf, perfInfo)
|
||||
}
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
npi.RemotePeers = rps
|
||||
}
|
||||
|
||||
getAndWriteNetPerfInfo := func() {
|
||||
if globalIsDistErasure && query.Get("perfnet") == "true" {
|
||||
localNPI := globalNotificationSys.GetNetPerfInfo(deadlinedCtx)
|
||||
anonymizeNetPerfInfo(&localNPI)
|
||||
healthInfo.Perf.Net = append(healthInfo.Perf.Net, localNPI)
|
||||
|
||||
partialWrite(healthInfo)
|
||||
|
||||
netInfos := globalNotificationSys.DispatchNetPerfChan(deadlinedCtx)
|
||||
for netInfo := range netInfos {
|
||||
anonymizeNetPerfInfo(&netInfo)
|
||||
healthInfo.Perf.Net = append(healthInfo.Perf.Net, netInfo)
|
||||
partialWrite(healthInfo)
|
||||
if query.Get(string(madmin.HealthDataTypePerfObj)) == "true" {
|
||||
if !globalIsDistErasure {
|
||||
return
|
||||
}
|
||||
|
||||
netPerf := globalNotificationSys.Netperf(ctx, time.Second*10)
|
||||
for _, np := range netPerf {
|
||||
np.Endpoint = anonAddr(np.Endpoint)
|
||||
healthInfo.Perf.NetPerf = append(healthInfo.Perf.NetPerf, np)
|
||||
}
|
||||
|
||||
ppi := globalNotificationSys.GetParallelNetPerfInfo(deadlinedCtx)
|
||||
anonymizeNetPerfInfo(&ppi)
|
||||
healthInfo.Perf.NetParallel = ppi
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
}
|
||||
@@ -1996,7 +2205,8 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
go func() {
|
||||
defer close(healthInfoCh)
|
||||
|
||||
partialWrite(healthInfo) // Write first message with only version populated
|
||||
partialWrite(healthInfo) // Write first message with only version and deployment id populated
|
||||
getAndWritePlatformInfo()
|
||||
getAndWriteCPUs()
|
||||
getAndWritePartitions()
|
||||
getAndWriteOSInfo()
|
||||
@@ -2004,6 +2214,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
getAndWriteProcInfo()
|
||||
getAndWriteMinioConfig()
|
||||
getAndWriteDrivePerfInfo()
|
||||
getAndWriteObjPerfInfo()
|
||||
getAndWriteNetPerfInfo()
|
||||
getAndWriteSysErrors()
|
||||
getAndWriteSysServices()
|
||||
@@ -2077,7 +2288,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-deadlinedCtx.Done():
|
||||
case <-healthCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -2332,19 +2543,9 @@ func checkConnection(endpointStr string, timeout time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, timeout)
|
||||
defer cancel()
|
||||
|
||||
client := &http.Client{Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: xhttp.NewCustomDialContext(timeout),
|
||||
ResponseHeaderTimeout: 5 * time.Second,
|
||||
TLSHandshakeTimeout: 5 * time.Second,
|
||||
ExpectContinueTimeout: 5 * time.Second,
|
||||
TLSClientConfig: &tls.Config{RootCAs: globalRootCAs},
|
||||
// Go net/http automatically unzip if content-type is
|
||||
// gzip disable this feature, as we are always interested
|
||||
// in raw stream.
|
||||
DisableCompression: true,
|
||||
}}
|
||||
defer client.CloseIdleConnections()
|
||||
client := &http.Client{
|
||||
Transport: globalProxyTransport,
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, endpointStr, nil)
|
||||
if err != nil {
|
||||
@@ -2437,8 +2638,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
// of profiling data of all nodes
|
||||
zipWriter := zip.NewWriter(encw)
|
||||
defer zipWriter.Close()
|
||||
|
||||
err = o.GetRawData(ctx, volume, file, func(r io.Reader, host, disk, filename string, si StatInfo) error {
|
||||
rawDataFn := func(r io.Reader, host, disk, filename string, si StatInfo) error {
|
||||
// Prefix host+disk
|
||||
filename = path.Join(host, disk, filename)
|
||||
if si.Dir {
|
||||
@@ -2449,6 +2649,10 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
// Not, set it to default.
|
||||
si.Mode = 0o600
|
||||
}
|
||||
if si.ModTime.IsZero() {
|
||||
// Set time to now.
|
||||
si.ModTime = time.Now()
|
||||
}
|
||||
header, zerr := zip.FileInfoHeader(dummyFileInfo{
|
||||
name: filename,
|
||||
size: si.Size,
|
||||
@@ -2471,10 +2675,33 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
err = o.GetRawData(ctx, volume, file, rawDataFn)
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
// save the format.json as part of inspect by default
|
||||
if volume != minioMetaBucket && file != formatConfigFile {
|
||||
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
|
||||
}
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
// save args passed to inspect command
|
||||
inspectArgs := []string{fmt.Sprintf(" Inspect path: %s%s%s\n", volume, slashSeparator, file)}
|
||||
cmdLine := []string{"Server command line args: "}
|
||||
for _, pool := range globalEndpoints {
|
||||
cmdLine = append(cmdLine, pool.CmdLine)
|
||||
}
|
||||
cmdLine = append(cmdLine, "\n")
|
||||
inspectArgs = append(inspectArgs, cmdLine...)
|
||||
inspectArgsBytes := []byte(strings.Join(inspectArgs, " "))
|
||||
if err = rawDataFn(bytes.NewReader(inspectArgsBytes), "", "", "inspect-input.txt", StatInfo{
|
||||
Size: int64(len(inspectArgsBytes)),
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
func createHostAnonymizerForFSMode() map[string]string {
|
||||
@@ -2520,6 +2747,7 @@ func anonymizeHost(hostAnonymizer map[string]string, endpoint Endpoint, poolNum
|
||||
if !found {
|
||||
// In distributed setup, anonymized addr = 'poolNum.serverNum'
|
||||
newHost := fmt.Sprintf("pool%d.server%d", poolNum, srvrNum)
|
||||
schemePfx := endpoint.Scheme + "://"
|
||||
|
||||
// Hostname
|
||||
mapIfNotPresent(hostAnonymizer, endpoint.Hostname(), newHost)
|
||||
@@ -2529,6 +2757,7 @@ func anonymizeHost(hostAnonymizer map[string]string, endpoint Endpoint, poolNum
|
||||
// Host + port
|
||||
newHostPort = newHost + ":" + endpoint.Port()
|
||||
mapIfNotPresent(hostAnonymizer, endpoint.Host, newHostPort)
|
||||
mapIfNotPresent(hostAnonymizer, schemePfx+endpoint.Host, newHostPort)
|
||||
}
|
||||
|
||||
newHostPortPath := newHostPort
|
||||
@@ -2537,10 +2766,11 @@ func anonymizeHost(hostAnonymizer map[string]string, endpoint Endpoint, poolNum
|
||||
currentHostPortPath := endpoint.Host + endpoint.Path
|
||||
newHostPortPath = newHostPort + endpoint.Path
|
||||
mapIfNotPresent(hostAnonymizer, currentHostPortPath, newHostPortPath)
|
||||
mapIfNotPresent(hostAnonymizer, schemePfx+currentHostPortPath, newHostPortPath)
|
||||
}
|
||||
|
||||
// Full url
|
||||
hostAnonymizer[currentURL] = endpoint.Scheme + "://" + newHostPortPath
|
||||
hostAnonymizer[currentURL] = schemePfx + newHostPortPath
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -194,7 +194,6 @@ func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-periodicTimer.C:
|
||||
periodicTimer.Reset(time.Minute * 5)
|
||||
now := UTCNow()
|
||||
ahs.Lock()
|
||||
for path, h := range ahs.healSeqMap {
|
||||
@@ -203,6 +202,8 @@ func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
ahs.Unlock()
|
||||
|
||||
periodicTimer.Reset(time.Minute * 5)
|
||||
case <-ctx.Done():
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
@@ -581,12 +582,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
// heal-results in memory and the client has not consumed it
|
||||
// for too long.
|
||||
unconsumedTimer := time.NewTimer(healUnconsumedTimeout)
|
||||
defer func() {
|
||||
// stop the timeout timer so it is garbage collected.
|
||||
if !unconsumedTimer.Stop() {
|
||||
<-unconsumedTimer.C
|
||||
}
|
||||
}()
|
||||
defer unconsumedTimer.Stop()
|
||||
|
||||
var itemsLen int
|
||||
for {
|
||||
@@ -701,7 +697,7 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
} else {
|
||||
task.opts.ScanMode = globalHealConfig.ScanMode()
|
||||
task.opts.ScanMode = madmin.HealNormalScan
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
|
||||
@@ -84,10 +84,12 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/cancel").HandlerFunc(gz(httpTraceAll(adminAPI.CancelDecommission))).Queries("pool", "{pool:.*}")
|
||||
}
|
||||
|
||||
// Profiling operations
|
||||
// Profiling operations - deprecated API
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(gz(httpTraceAll(adminAPI.StartProfilingHandler))).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(gz(httpTraceAll(adminAPI.DownloadProfilingHandler)))
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(gz(httpTraceAll(adminAPI.ProfileHandler)))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
@@ -168,50 +170,56 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// Set Group Status
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(gz(httpTraceHdrs(adminAPI.SetGroupStatus))).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
// GetBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler))).Queries("bucket", "{bucket:.*}")
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler))).Queries("bucket", "{bucket:.*}")
|
||||
// GetBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler))).Queries("bucket", "{bucket:.*}")
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler))).Queries("bucket", "{bucket:.*}")
|
||||
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.ListRemoteTargetsHandler))).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.SetRemoteTargetHandler))).Queries("bucket", "{bucket:.*}")
|
||||
// RemoveRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler))).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.ListRemoteTargetsHandler))).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.SetRemoteTargetHandler))).Queries("bucket", "{bucket:.*}")
|
||||
// RemoveRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler))).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
|
||||
// Remote Tier management operations
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.AddTierHandler)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.EditTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListTierHandler)))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.RemoveTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.VerifyTierHandler)))
|
||||
// Tier stats
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier-stats").HandlerFunc(gz(httpTraceHdrs(adminAPI.TierStatsHandler)))
|
||||
// Bucket migration operations
|
||||
// ExportBucketMetaHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/export-bucket-metadata").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.ExportBucketMetadataHandler)))
|
||||
// ImportBucketMetaHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-bucket-metadata").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.ImportBucketMetadataHandler)))
|
||||
|
||||
// Cluster Replication APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/add").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationAdd)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationRemove)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/info").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationInfo)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/metainfo").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationMetaInfo)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/status").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationStatus)))
|
||||
// Remote Tier management operations
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.AddTierHandler)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.EditTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListTierHandler)))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.RemoveTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.VerifyTierHandler)))
|
||||
// Tier stats
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier-stats").HandlerFunc(gz(httpTraceHdrs(adminAPI.TierStatsHandler)))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/join").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerJoin)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/peer/bucket-ops").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerBucketOps))).Queries("bucket", "{bucket:.*}").Queries("operation", "{operation:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/iam-item").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateIAMItem)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/bucket-meta").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateBucketItem)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/peer/idp-settings").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerGetIDPSettings)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerRemove)))
|
||||
}
|
||||
// Cluster Replication APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/add").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationAdd)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationRemove)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/info").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationInfo)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/metainfo").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationMetaInfo)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/status").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationStatus)))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/join").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerJoin)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/peer/bucket-ops").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerBucketOps))).Queries("bucket", "{bucket:.*}").Queries("operation", "{operation:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/iam-item").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateIAMItem)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/bucket-meta").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateBucketItem)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/peer/idp-settings").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerGetIDPSettings)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerRemove)))
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Top locks
|
||||
|
||||
@@ -31,7 +31,10 @@ import (
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
|
||||
var localEndpoints Endpoints
|
||||
addr := r.Host
|
||||
addr := globalLocalNodeName
|
||||
if r != nil {
|
||||
addr = r.Host
|
||||
}
|
||||
if globalIsDistErasure {
|
||||
addr = globalLocalNodeName
|
||||
}
|
||||
@@ -40,7 +43,7 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
nodeName := endpoint.Host
|
||||
if nodeName == "" {
|
||||
nodeName = r.Host
|
||||
nodeName = addr
|
||||
}
|
||||
if endpoint.IsLocal {
|
||||
// Only proceed for local endpoints
|
||||
@@ -50,7 +53,7 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
}
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
if err := isServerResolvable(endpoint, 2*time.Second); err == nil {
|
||||
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
} else {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
|
||||
@@ -131,7 +131,7 @@ const (
|
||||
ErrReplicationNeedsVersioningError
|
||||
ErrReplicationBucketNeedsVersioningError
|
||||
ErrReplicationDenyEditError
|
||||
ErrReplicationNoMatchingRuleError
|
||||
ErrReplicationNoExistingObjects
|
||||
ErrObjectRestoreAlreadyInProgress
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
@@ -280,6 +280,7 @@ const (
|
||||
ErrSiteReplicationBucketConfigError
|
||||
ErrSiteReplicationBucketMetaError
|
||||
ErrSiteReplicationIAMError
|
||||
ErrSiteReplicationConfigMissing
|
||||
|
||||
// Bucket Quota error codes
|
||||
ErrAdminBucketQuotaExceeded
|
||||
@@ -383,6 +384,7 @@ const (
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAdminResourceInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrAccountNotEligible
|
||||
ErrAdminServiceAccountNotFound
|
||||
@@ -891,15 +893,15 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Bandwidth limit for remote target must be atleast 100MBps",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationNoMatchingRuleError: {
|
||||
Code: "XMinioReplicationNoMatchingRule",
|
||||
Description: "No matching replication rule found for this object prefix",
|
||||
ErrReplicationNoExistingObjects: {
|
||||
Code: "XMinioReplicationNoExistingObjects",
|
||||
Description: "No matching ExistingsObjects rule enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationDenyEditError: {
|
||||
Code: "XMinioReplicationDenyEdit",
|
||||
Description: "Cannot alter local replication config since this server is in a cluster replication setup",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteIdenticalToSource: {
|
||||
Code: "XMinioAdminRemoteIdenticalToSource",
|
||||
@@ -1165,7 +1167,7 @@ var errorCodes = errorCodeMap{
|
||||
ErrObjectExistsAsDirectory: {
|
||||
Code: "XMinioObjectExistsAsDirectory",
|
||||
Description: "Object name already exists as a directory.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidObjectName: {
|
||||
Code: "XMinioInvalidObjectName",
|
||||
@@ -1339,7 +1341,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Error while replicating an IAM item",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
|
||||
ErrSiteReplicationConfigMissing: {
|
||||
Code: "XMinioSiteReplicationConfigMissingError",
|
||||
Description: "Site not found in site replication configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMaximumExpires: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds",
|
||||
@@ -1825,6 +1831,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "User is not allowed to be same as admin access key",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAdminResourceInvalidArgument: {
|
||||
Code: "XMinioInvalidResource",
|
||||
Description: "Policy, user or group names are not allowed to begin or end with space characters",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminAccountNotEligible: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The administrator key is not eligible for this operation",
|
||||
@@ -2220,7 +2231,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
case crypto.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XMinIOEncryptionError",
|
||||
Code: "XMinioEncryptionError",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
|
||||
@@ -728,6 +728,14 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
|
||||
}
|
||||
|
||||
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
if statusCode == 0 {
|
||||
statusCode = 200
|
||||
}
|
||||
// Similar check to http.checkWriteHeaderCode
|
||||
if statusCode < 100 || statusCode > 999 {
|
||||
logger.Error(fmt.Sprintf("invalid WriteHeader code %v", statusCode))
|
||||
statusCode = http.StatusInternalServerError
|
||||
}
|
||||
setCommonHeaders(w)
|
||||
if mType != mimeNone {
|
||||
w.Header().Set(xhttp.ContentType, string(mType))
|
||||
@@ -791,6 +799,12 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
}
|
||||
|
||||
// Similar check to http.checkWriteHeaderCode
|
||||
if err.HTTPStatusCode < 100 || err.HTTPStatusCode > 999 {
|
||||
logger.Error(fmt.Sprintf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code))
|
||||
err.HTTPStatusCode = http.StatusInternalServerError
|
||||
}
|
||||
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path,
|
||||
w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
||||
|
||||
@@ -342,7 +342,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
collectAPIStats("getbucketnotification", maxClients(gz(httpTraceAll(api.GetBucketNotificationHandler))))).Queries("notification", "")
|
||||
// ListenNotification
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listennotification", maxClients(gz(httpTraceAll(api.ListenNotificationHandler))))).Queries("events", "{events:.*}")
|
||||
collectAPIStats("listennotification", gz(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
|
||||
// ResetBucketReplicationStatus - MinIO extension API
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("resetbucketreplicationstatus", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStatusHandler))))).Queries("replication-reset-status", "")
|
||||
@@ -474,7 +474,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
collectAPIStats("listennotification", maxClients(gz(httpTraceAll(api.ListenNotificationHandler))))).Queries("events", "{events:.*}")
|
||||
collectAPIStats("listennotification", gz(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -197,13 +197,7 @@ func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
if token == "" {
|
||||
claims := xjwt.NewMapClaims()
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{}, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
@@ -212,13 +206,19 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
// hijacking the policies. We need to make sure that this is
|
||||
// based an admin credential such that token cannot be decoded
|
||||
// on the client side and is treated like an opaque value.
|
||||
claims, err := auth.ExtractClaims(token, globalActiveCred.SecretKey)
|
||||
claims, err := auth.ExtractClaims(token, secret)
|
||||
if err != nil {
|
||||
return nil, errAuthentication
|
||||
if subtle.ConstantTimeCompare([]byte(secret), []byte(globalActiveCred.SecretKey)) == 1 {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims, err = auth.ExtractClaims(token, globalActiveCred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
}
|
||||
|
||||
// If OPA is set, return without any further checks.
|
||||
if globalPolicyOPA != nil {
|
||||
// If AuthZPlugin is set, return without any further checks.
|
||||
if newGlobalAuthZPluginFn() != nil {
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
@@ -235,29 +235,56 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
logger.LogIf(GlobalContext, err, logger.Application)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes)
|
||||
}
|
||||
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
return getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
|
||||
token := getSessionToken(r)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
// x-amz-security-token is not allowed for anonymous access.
|
||||
return nil, ErrNoAccessKey
|
||||
}
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
|
||||
if token == "" && cred.IsTemp() {
|
||||
// Temporary credentials should always have x-amz-security-token
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
|
||||
if token != "" && !cred.IsTemp() {
|
||||
// x-amz-security-token should not present for static credentials.
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
return claims, ErrNone
|
||||
|
||||
if cred.IsTemp() && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
// validate token for temporary credentials only.
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
|
||||
secret := globalActiveCred.SecretKey
|
||||
if cred.IsServiceAccount() {
|
||||
token = cred.SessionToken
|
||||
secret = cred.SecretKey
|
||||
}
|
||||
|
||||
if token != "" {
|
||||
claims, err := getClaimsFromTokenWithSecret(token, secret)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
}
|
||||
|
||||
claims := xjwt.NewMapClaims()
|
||||
return claims.Map(), ErrNone
|
||||
}
|
||||
|
||||
// Check request auth type verifies the incoming http request
|
||||
|
||||
@@ -32,6 +32,12 @@ import (
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
type nullReader struct{}
|
||||
|
||||
func (r *nullReader) Read(b []byte) (int, error) {
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// Test get request auth type.
|
||||
func TestGetRequestAuthType(t *testing.T) {
|
||||
type testCase struct {
|
||||
@@ -341,7 +347,8 @@ func mustNewSignedEmptyMD5Request(method string, urlStr string, contentLength in
|
||||
}
|
||||
|
||||
func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int64,
|
||||
body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
body io.ReadSeeker, t *testing.T,
|
||||
) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
req.Header.Set("Content-Md5", "YWFhYWFhYWFhYWFhYWFhCg==")
|
||||
cred := globalActiveCred
|
||||
|
||||
@@ -277,14 +277,6 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := bgSeq.healDiskMeta(objAPI); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
|
||||
}
|
||||
|
||||
@@ -320,9 +312,6 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-diskCheckTimer.C:
|
||||
// Reset to next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
|
||||
var erasureSetInPoolDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
healDisks := globalBackgroundHealState.getHealLocalDiskEndpoints()
|
||||
@@ -456,6 +445,9 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Reset for next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -27,6 +26,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/minio/highwayhash"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
"golang.org/x/crypto/blake2b"
|
||||
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
|
||||
@@ -158,7 +158,7 @@ func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -206,6 +206,15 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
//
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: nil,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
@@ -43,7 +43,8 @@ func (sys *BucketSSEConfigSys) Get(bucket string) (*sse.BucketSSEConfig, error)
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
return globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
sseCfg, _, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
return sseCfg, err
|
||||
}
|
||||
|
||||
// validateBucketSSEConfig parses bucket encryption configuration and validates if it is supported by MinIO.
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -33,7 +32,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
@@ -364,6 +362,18 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
}) {
|
||||
bucketsInfo[n] = bucketInfo
|
||||
n++
|
||||
} else if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.GetBucketLocationAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
bucketsInfo[n] = bucketInfo
|
||||
n++
|
||||
}
|
||||
}
|
||||
bucketsInfo = bucketsInfo[:n]
|
||||
@@ -471,9 +481,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
hasLockEnabled = true
|
||||
}
|
||||
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
suspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
|
||||
type deleteResult struct {
|
||||
delInfo DeletedObject
|
||||
errInfo DeleteError
|
||||
@@ -481,8 +488,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
deleteResults := make([]deleteResult, len(deleteObjectsReq.Objects))
|
||||
|
||||
vc, _ := globalBucketVersioningSys.Get(bucket)
|
||||
oss := make([]*objSweeper, len(deleteObjectsReq.Objects))
|
||||
|
||||
for index, object := range deleteObjectsReq.Objects {
|
||||
if apiErrCode := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); apiErrCode != ErrNone {
|
||||
if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
|
||||
@@ -514,11 +521,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
opts := ObjectOptions{
|
||||
VersionID: object.VersionID,
|
||||
Versioned: versioned,
|
||||
VersionSuspended: suspended,
|
||||
Versioned: vc.PrefixEnabled(object.ObjectName),
|
||||
VersionSuspended: vc.Suspended(),
|
||||
}
|
||||
|
||||
if replicateDeletes || object.VersionID != "" && hasLockEnabled || !globalTierConfigMgr.Empty() {
|
||||
if replicateDeletes || hasLockEnabled || !globalTierConfigMgr.Empty() {
|
||||
if !globalTierConfigMgr.Empty() && object.VersionID == "" && opts.VersionSuspended {
|
||||
opts.VersionID = nullVersionID
|
||||
}
|
||||
@@ -526,7 +533,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
|
||||
if !globalTierConfigMgr.Empty() {
|
||||
oss[index] = newObjSweeper(bucket, object.ObjectName).WithVersion(opts.VersionID).WithVersioning(versioned, suspended)
|
||||
oss[index] = newObjSweeper(bucket, object.ObjectName).WithVersion(opts.VersionID).WithVersioning(opts.Versioned, opts.VersionSuspended)
|
||||
oss[index].SetTransitionState(goi.TransitionedObject)
|
||||
}
|
||||
|
||||
@@ -547,7 +554,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
object.ReplicateDecisionStr = dsc.String()
|
||||
}
|
||||
}
|
||||
if object.VersionID != "" && hasLockEnabled {
|
||||
if hasLockEnabled {
|
||||
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, goi, gerr); apiErrCode != ErrNone {
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
deleteResults[index].errInfo = DeleteError{
|
||||
@@ -581,8 +588,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
deleteList := toNames(objectsToDelete)
|
||||
dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{
|
||||
Versioned: versioned,
|
||||
VersionSuspended: suspended,
|
||||
PrefixEnabledFn: vc.PrefixEnabled,
|
||||
VersionSuspended: vc.Suspended(),
|
||||
})
|
||||
|
||||
for i := range errs {
|
||||
@@ -638,22 +645,12 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
continue
|
||||
}
|
||||
|
||||
if replicateDeletes {
|
||||
if dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == Pending {
|
||||
dv := DeletedObjectReplicationInfo{
|
||||
DeletedObject: dobj,
|
||||
Bucket: bucket,
|
||||
}
|
||||
scheduleReplicationDelete(ctx, dv, objectAPI)
|
||||
if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == Pending) {
|
||||
dv := DeletedObjectReplicationInfo{
|
||||
DeletedObject: dobj,
|
||||
Bucket: bucket,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Notify deleted event for objects.
|
||||
for _, dobj := range deletedObjects {
|
||||
if dobj.ObjectName == "" {
|
||||
continue
|
||||
scheduleReplicationDelete(ctx, dv, objectAPI)
|
||||
}
|
||||
|
||||
eventName := event.ObjectRemovedDelete
|
||||
@@ -706,13 +703,27 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectLockEnabled := false
|
||||
if vs, found := r.Header[http.CanonicalHeaderKey("x-amz-bucket-object-lock-enabled")]; found {
|
||||
v := strings.ToLower(strings.Join(vs, ""))
|
||||
if v != "true" && v != "false" {
|
||||
if vs := r.Header.Get(xhttp.AmzObjectLockEnabled); len(vs) > 0 {
|
||||
v := strings.ToLower(vs)
|
||||
switch v {
|
||||
case "true", "false":
|
||||
objectLockEnabled = v == "true"
|
||||
default:
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
forceCreate := false
|
||||
if vs := r.Header.Get(xhttp.MinIOForceCreate); len(vs) > 0 {
|
||||
v := strings.ToLower(vs)
|
||||
switch v {
|
||||
case "true", "false":
|
||||
forceCreate = v == "true"
|
||||
default:
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
objectLockEnabled = v == "true"
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.CreateBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
@@ -737,6 +748,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
opts := BucketOptions{
|
||||
Location: location,
|
||||
LockEnabled: objectLockEnabled,
|
||||
ForceCreate: forceCreate,
|
||||
}
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
@@ -794,19 +806,14 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// Proceed to creating a bucket.
|
||||
err := objectAPI.MakeBucketWithLocation(ctx, bucket, opts)
|
||||
if _, ok := err.(BucketExists); ok {
|
||||
// Though bucket exists locally, we send the site-replication
|
||||
// hook to ensure all sites have this bucket. If the hook
|
||||
// succeeds, the client will still receive a bucket exists
|
||||
// message.
|
||||
err2 := globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)
|
||||
if err2 != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
if err := objectAPI.MakeBucketWithLocation(ctx, bucket, opts); err != nil {
|
||||
if _, ok := err.(BucketExists); ok {
|
||||
// Though bucket exists locally, we send the site-replication
|
||||
// hook to ensure all sites have this bucket. If the hook
|
||||
// succeeds, the client will still receive a bucket exists
|
||||
// message.
|
||||
globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -815,8 +822,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
|
||||
// Call site replication hook
|
||||
err = globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)
|
||||
if err != nil {
|
||||
if err := globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1350,7 +1356,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
if !globalIsErasure {
|
||||
if globalIsGateway {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1374,7 +1380,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
}
|
||||
|
||||
// Deny object locking configuration settings on existing buckets without object lock enabled.
|
||||
if _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil {
|
||||
if _, _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1427,7 +1433,7 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1529,7 +1535,7 @@ func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1582,375 +1588,3 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// PutBucketReplicationConfigHandler - PUT Bucket replication configuration.
|
||||
// ----------
|
||||
// Add a replication configuration on the specified bucket as specified in https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html
|
||||
func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketReplicationConfig")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationDenyEditError), r.URL)
|
||||
return
|
||||
}
|
||||
if versioned := globalBucketVersioningSys.Enabled(bucket); !versioned {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNeedsVersioningError), r.URL)
|
||||
return
|
||||
}
|
||||
replicationConfig, err := replication.ParseConfig(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
apiErr.Description = err.Error()
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
sameTarget, apiErr := validateReplicationDestination(ctx, bucket, replicationConfig)
|
||||
if apiErr != noError {
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
// Validate the received bucket replication config
|
||||
if err = replicationConfig.Validate(bucket, sameTarget); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(replicationConfig)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketReplicationConfigHandler - GET Bucket replication configuration.
|
||||
// ----------
|
||||
// Gets the replication configuration for a bucket.
|
||||
func (api objectAPIHandlers) GetBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketReplicationConfig")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// check if user has permissions to perform this operation
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, configData)
|
||||
}
|
||||
|
||||
// DeleteBucketReplicationConfigHandler - DELETE Bucket replication config.
|
||||
// ----------
|
||||
func (api objectAPIHandlers) DeleteBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketReplicationConfig")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationDenyEditError), r.URL)
|
||||
return
|
||||
}
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketReplicationMetricsHandler - GET Bucket replication metrics.
|
||||
// ----------
|
||||
// Gets the replication metrics for a bucket.
|
||||
func (api objectAPIHandlers) GetBucketReplicationMetricsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketReplicationMetrics")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// check if user has permissions to perform this operation
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
var usageInfo BucketUsageInfo
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err == nil && !dataUsageInfo.LastUpdate.IsZero() {
|
||||
usageInfo = dataUsageInfo.BucketsUsage[bucket]
|
||||
}
|
||||
|
||||
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
if err = enc.Encode(getLatestReplicationStats(bucket, usageInfo)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ResetBucketReplicationStartHandler - starts a replication reset for all objects in a bucket which
|
||||
// qualify for replication and re-sync the object(s) to target, provided ExistingObjectReplication is
|
||||
// enabled for the qualifying rule. This API is a MinIO only extension provided for situations where
|
||||
// remote target is entirely lost,and previously replicated objects need to be re-synced. If resync is
|
||||
// already in progress it returns an error
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationStart")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
durationStr := r.URL.Query().Get("older-than")
|
||||
arn := r.URL.Query().Get("arn")
|
||||
resetID := r.URL.Query().Get("reset-id")
|
||||
if resetID == "" {
|
||||
resetID = mustGetUUID()
|
||||
}
|
||||
var (
|
||||
days time.Duration
|
||||
err error
|
||||
)
|
||||
if durationStr != "" {
|
||||
days, err = time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("invalid query parameter older-than %s for %s : %w", durationStr, bucket, err),
|
||||
}), r.URL)
|
||||
}
|
||||
}
|
||||
resetBeforeDate := UTCNow().AddDate(0, 0, -1*int(days/24))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ResetBucketReplicationStateAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if !config.HasActiveRules("", true) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNoMatchingRuleError), r.URL)
|
||||
return
|
||||
}
|
||||
tgtArns := config.FilterTargetArns(
|
||||
replication.ObjectOpts{
|
||||
OpType: replication.ResyncReplicationType,
|
||||
TargetArn: arn,
|
||||
})
|
||||
|
||||
if len(tgtArns) == 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("Remote target ARN %s missing or ineligible for replication resync", arn),
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if len(tgtArns) > 1 && arn == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("ARN should be specified for replication reset"),
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
var rinfo ResyncTargetsInfo
|
||||
target := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, tgtArns[0])
|
||||
target.ResetBeforeDate = UTCNow().AddDate(0, 0, -1*int(days/24))
|
||||
target.ResetID = resetID
|
||||
rinfo.Targets = append(rinfo.Targets, ResyncTarget{Arn: tgtArns[0], ResetID: target.ResetID})
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, true); err != nil {
|
||||
switch err.(type) {
|
||||
case BucketRemoteConnectionErr:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
}
|
||||
}
|
||||
if err := startReplicationResync(ctx, bucket, arn, resetID, resetBeforeDate, objectAPI); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: err,
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(rinfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// ResetBucketReplicationStatusHandler - returns the status of replication reset.
|
||||
// This API is a MinIO only extension
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationStatus")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arn := r.URL.Query().Get("arn")
|
||||
var err error
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ResetBucketReplicationStateAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
globalReplicationPool.resyncState.RLock()
|
||||
brs, ok := globalReplicationPool.resyncState.statusMap[bucket]
|
||||
if !ok {
|
||||
brs, err = loadBucketResyncMetadata(ctx, bucket, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("No replication resync status available for %s", arn),
|
||||
}), r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var rinfo ResyncTargetsInfo
|
||||
for tarn, st := range brs.TargetsMap {
|
||||
if arn != "" && tarn != arn {
|
||||
continue
|
||||
}
|
||||
rinfo.Targets = append(rinfo.Targets, ResyncTarget{
|
||||
Arn: tarn,
|
||||
ResetID: st.ResyncID,
|
||||
StartTime: st.StartTime,
|
||||
EndTime: st.EndTime,
|
||||
ResyncStatus: st.ResyncStatus.String(),
|
||||
ReplicatedSize: st.ReplicatedSize,
|
||||
ReplicatedCount: st.ReplicatedCount,
|
||||
FailedSize: st.FailedSize,
|
||||
FailedCount: st.FailedCount,
|
||||
Bucket: st.Bucket,
|
||||
Object: st.Object,
|
||||
})
|
||||
}
|
||||
globalReplicationPool.resyncState.RUnlock()
|
||||
data, err := json.Marshal(rinfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
@@ -36,7 +36,8 @@ func TestRemoveBucketHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewReader([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
|
||||
@@ -34,7 +34,8 @@ func TestBucketLifecycleWrongCredentials(t *testing.T) {
|
||||
|
||||
// Test for authentication
|
||||
func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
method string
|
||||
|
||||
@@ -247,11 +247,11 @@ func (t *transitionState) addLastDayStats(tier string, ts tierStats) {
|
||||
t.lastDayStats[tier].addStats(ts)
|
||||
}
|
||||
|
||||
func (t *transitionState) getDailyAllTierStats() dailyAllTierStats {
|
||||
func (t *transitionState) getDailyAllTierStats() DailyAllTierStats {
|
||||
t.lastDayMu.RLock()
|
||||
defer t.lastDayMu.RUnlock()
|
||||
|
||||
res := make(dailyAllTierStats, len(t.lastDayStats))
|
||||
res := make(DailyAllTierStats, len(t.lastDayStats))
|
||||
for tier, st := range t.lastDayStats {
|
||||
res[tier] = st.clone()
|
||||
}
|
||||
@@ -330,7 +330,7 @@ const (
|
||||
// 2. when a transitioned object expires (based on an ILM rule).
|
||||
func expireTransitionedObject(ctx context.Context, objectAPI ObjectLayer, oi *ObjectInfo, lcOpts lifecycle.ObjectOpts, action expireAction) error {
|
||||
var opts ObjectOptions
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(oi.Bucket)
|
||||
opts.Versioned = globalBucketVersioningSys.PrefixEnabled(oi.Bucket, oi.Name)
|
||||
opts.VersionID = lcOpts.VersionID
|
||||
opts.Expiration = ExpirationOptions{Expire: true}
|
||||
switch action {
|
||||
@@ -415,8 +415,8 @@ func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo)
|
||||
ETag: oi.ETag,
|
||||
},
|
||||
VersionID: oi.VersionID,
|
||||
Versioned: globalBucketVersioningSys.Enabled(oi.Bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(oi.Bucket),
|
||||
Versioned: globalBucketVersioningSys.PrefixEnabled(oi.Bucket, oi.Name),
|
||||
VersionSuspended: globalBucketVersioningSys.PrefixSuspended(oi.Bucket, oi.Name),
|
||||
MTime: oi.ModTime,
|
||||
}
|
||||
return tier, objectAPI.TransitionObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
@@ -575,8 +575,8 @@ func (r *RestoreObjectRequest) validate(ctx context.Context, objAPI ObjectLayer)
|
||||
|
||||
// postRestoreOpts returns ObjectOptions with version-id from the POST restore object request for a given bucket and object.
|
||||
func postRestoreOpts(ctx context.Context, r *http.Request, bucket, object string) (opts ObjectOptions, err error) {
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
versionSuspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
|
||||
versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
|
||||
vid := strings.TrimSpace(r.Form.Get(xhttp.VersionID))
|
||||
if vid != "" && vid != nullVersionID {
|
||||
_, err := uuid.Parse(vid)
|
||||
@@ -627,8 +627,8 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
|
||||
meta[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionAES
|
||||
}
|
||||
return ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
Versioned: globalBucketVersioningSys.PrefixEnabled(bucket, object),
|
||||
VersionSuspended: globalBucketVersioningSys.PrefixSuspended(bucket, object),
|
||||
UserDefined: meta,
|
||||
}
|
||||
}
|
||||
@@ -640,8 +640,8 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
|
||||
}
|
||||
|
||||
return ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
Versioned: globalBucketVersioningSys.PrefixEnabled(bucket, object),
|
||||
VersionSuspended: globalBucketVersioningSys.PrefixSuspended(bucket, object),
|
||||
UserDefined: meta,
|
||||
VersionID: objInfo.VersionID,
|
||||
MTime: objInfo.ModTime,
|
||||
|
||||
@@ -203,7 +203,7 @@ func TestObjectIsRemote(t *testing.T) {
|
||||
if got := fi.IsRemote(); got != tc.remote {
|
||||
t.Fatalf("Test %d.a: expected %v got %v", i+1, tc.remote, got)
|
||||
}
|
||||
oi := fi.ToObjectInfo("bucket", "object")
|
||||
oi := fi.ToObjectInfo("bucket", "object", false)
|
||||
if got := oi.IsRemote(); got != tc.remote {
|
||||
t.Fatalf("Test %d.b: expected %v got %v", i+1, tc.remote, got)
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -26,26 +27,9 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
|
||||
"github.com/minio/minio/internal/sync/errgroup"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
|
||||
g := errgroup.WithNErrs(len(objects)).WithConcurrency(500)
|
||||
for index := range objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
size, err := objects[index].GetActualSize()
|
||||
if err == nil {
|
||||
objects[index].Size = size
|
||||
}
|
||||
objects[index].ETag = objects[index].GetActualETag(nil)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
g.Wait()
|
||||
}
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
// if one of the args do not meet the required conditions.
|
||||
// Special conditions required by MinIO server are as below
|
||||
@@ -116,7 +100,11 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectVersionsInfo.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectVersionsInfo.Objects); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to decrypt ETag: %v", err)) // TODO(aead): Remove once we are confident that decryption does not fail accidentially
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo)
|
||||
|
||||
@@ -178,7 +166,11 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectsV2Info.Objects); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to decrypt ETag: %v", err)) // TODO(aead): Remove once we are confident that decryption does not fail accidentially
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
@@ -253,7 +245,11 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectsV2Info.Objects); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to decrypt ETag: %v", err)) // TODO(aead): Remove once we are confident that decryption does not fail accidentially
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
@@ -350,7 +346,11 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectsInfo.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectsInfo.Objects); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to decrypt ETag: %v", err)) // TODO(aead): Remove once we are confident that decryption does not fail accidentially
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
@@ -111,31 +112,29 @@ func (sys *BucketMetadataSys) Update(ctx context.Context, bucket string, configF
|
||||
switch configFile {
|
||||
case bucketPolicyConfig:
|
||||
meta.PolicyConfigJSON = configData
|
||||
meta.PolicyConfigUpdatedAt = UTCNow()
|
||||
case bucketNotificationConfig:
|
||||
meta.NotificationConfigXML = configData
|
||||
case bucketLifecycleConfig:
|
||||
meta.LifecycleConfigXML = configData
|
||||
case bucketSSEConfig:
|
||||
meta.EncryptionConfigXML = configData
|
||||
meta.EncryptionConfigUpdatedAt = UTCNow()
|
||||
case bucketTaggingConfig:
|
||||
meta.TaggingConfigXML = configData
|
||||
meta.TaggingConfigUpdatedAt = UTCNow()
|
||||
case bucketQuotaConfigFile:
|
||||
meta.QuotaConfigJSON = configData
|
||||
meta.QuotaConfigUpdatedAt = UTCNow()
|
||||
case objectLockConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.ObjectLockConfigXML = configData
|
||||
meta.ObjectLockConfigUpdatedAt = UTCNow()
|
||||
case bucketVersioningConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.VersioningConfigXML = configData
|
||||
meta.VersioningConfigUpdatedAt = UTCNow()
|
||||
case bucketReplicationConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.ReplicationConfigXML = configData
|
||||
meta.ReplicationConfigUpdatedAt = UTCNow()
|
||||
case bucketTargetsFile:
|
||||
meta.BucketTargetsConfigJSON, meta.BucketTargetsConfigMetaJSON, err = encryptBucketMetadata(meta.Name, configData, kms.Context{
|
||||
bucket: meta.Name,
|
||||
@@ -186,44 +185,47 @@ func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
|
||||
|
||||
// GetVersioningConfig returns configured versioning config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Versioning, error) {
|
||||
func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Versioning, time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return &versioning.Versioning{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/"}, meta.Created, nil
|
||||
}
|
||||
return &versioning.Versioning{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/"}, time.Time{}, err
|
||||
}
|
||||
return meta.versioningConfig, nil
|
||||
return meta.versioningConfig, meta.VersioningConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetTaggingConfig returns configured tagging config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error) {
|
||||
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketTaggingNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketTaggingNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.taggingConfig == nil {
|
||||
return nil, BucketTaggingNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketTaggingNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.taggingConfig, nil
|
||||
return meta.taggingConfig, meta.TaggingConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetObjectLockConfig returns configured object lock config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Config, error) {
|
||||
func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Config, time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.objectLockConfig == nil {
|
||||
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.objectLockConfig, nil
|
||||
return meta.objectLockConfig, meta.ObjectLockConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetLifecycleConfig returns configured lifecycle config
|
||||
@@ -283,69 +285,82 @@ func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Confi
|
||||
|
||||
// GetSSEConfig returns configured SSE config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEConfig, time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.sseConfig == nil {
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.sseConfig, nil
|
||||
return meta.sseConfig, meta.EncryptionConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// CreatedAt returns the time of creation of bucket
|
||||
func (sys *BucketMetadataSys) CreatedAt(bucket string) (time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return meta.Created.UTC(), nil
|
||||
}
|
||||
|
||||
// GetPolicyConfig returns configured bucket policy
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, error) {
|
||||
func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, time.Time, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
return nil, time.Time{}, errServerNotInitialized
|
||||
}
|
||||
return objAPI.GetBucketPolicy(GlobalContext, bucket)
|
||||
p, err := objAPI.GetBucketPolicy(GlobalContext, bucket)
|
||||
return p, UTCNow(), err
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketPolicyNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.policyConfig == nil {
|
||||
return nil, BucketPolicyNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.policyConfig, nil
|
||||
return meta.policyConfig, meta.PolicyConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetQuotaConfig returns configured bucket quota
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetQuotaConfig(ctx context.Context, bucket string) (*madmin.BucketQuota, error) {
|
||||
func (sys *BucketMetadataSys) GetQuotaConfig(ctx context.Context, bucket string) (*madmin.BucketQuota, time.Time, error) {
|
||||
meta, err := sys.GetConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, time.Time{}, BucketQuotaConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
return meta.quotaConfig, nil
|
||||
return meta.quotaConfig, meta.QuotaConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetReplicationConfig returns configured bucket replication config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket string) (*replication.Config, error) {
|
||||
func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket string) (*replication.Config, time.Time, error) {
|
||||
meta, err := sys.GetConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
|
||||
if meta.replicationConfig == nil {
|
||||
return nil, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.replicationConfig, nil
|
||||
return meta.replicationConfig, meta.ReplicationConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetBucketTargetsConfig returns configured bucket targets for this bucket
|
||||
@@ -353,6 +368,9 @@ func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket s
|
||||
func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.BucketTargets, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if meta.bucketTargetConfig == nil {
|
||||
@@ -361,20 +379,6 @@ func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.Buc
|
||||
return meta.bucketTargetConfig, nil
|
||||
}
|
||||
|
||||
// GetBucketTarget returns the target for the bucket and arn.
|
||||
func (sys *BucketMetadataSys) GetBucketTarget(bucket string, arn string) (madmin.BucketTarget, error) {
|
||||
targets, err := sys.GetBucketTargetsConfig(bucket)
|
||||
if err != nil {
|
||||
return madmin.BucketTarget{}, err
|
||||
}
|
||||
for _, t := range targets.Targets {
|
||||
if t.Arn == arn {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
return madmin.BucketTarget{}, errConfigNotFound
|
||||
}
|
||||
|
||||
// GetConfig returns a specific configuration from the bucket metadata.
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (BucketMetadata, error) {
|
||||
|
||||
@@ -81,6 +81,13 @@ type BucketMetadata struct {
|
||||
ReplicationConfigXML []byte
|
||||
BucketTargetsConfigJSON []byte
|
||||
BucketTargetsConfigMetaJSON []byte
|
||||
PolicyConfigUpdatedAt time.Time
|
||||
ObjectLockConfigUpdatedAt time.Time
|
||||
EncryptionConfigUpdatedAt time.Time
|
||||
TaggingConfigUpdatedAt time.Time
|
||||
QuotaConfigUpdatedAt time.Time
|
||||
ReplicationConfigUpdatedAt time.Time
|
||||
VersioningConfigUpdatedAt time.Time
|
||||
|
||||
// Unexported fields. Must be updated atomically.
|
||||
policyConfig *policy.Policy
|
||||
@@ -98,9 +105,10 @@ type BucketMetadata struct {
|
||||
|
||||
// newBucketMetadata creates BucketMetadata with the supplied name and Created to Now.
|
||||
func newBucketMetadata(name string) BucketMetadata {
|
||||
now := UTCNow()
|
||||
return BucketMetadata{
|
||||
Name: name,
|
||||
Created: UTCNow(),
|
||||
Created: now,
|
||||
notificationConfig: &event.Config{
|
||||
XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/",
|
||||
},
|
||||
@@ -157,8 +165,13 @@ func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket strin
|
||||
if err := b.convertLegacyConfigs(ctx, objectAPI); err != nil {
|
||||
return b, err
|
||||
}
|
||||
|
||||
// migrate unencrypted remote targets
|
||||
return b, b.migrateTargetConfig(ctx, objectAPI)
|
||||
if err = b.migrateTargetConfig(ctx, objectAPI); err != nil {
|
||||
return b, err
|
||||
}
|
||||
b.defaultTimestamps()
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// parseAllConfigs will parse all configs and populate the private fields.
|
||||
@@ -347,6 +360,37 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
return nil
|
||||
}
|
||||
|
||||
// default timestamps to metadata Created timestamp if unset.
|
||||
func (b *BucketMetadata) defaultTimestamps() {
|
||||
if b.PolicyConfigUpdatedAt.IsZero() {
|
||||
b.PolicyConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.EncryptionConfigUpdatedAt.IsZero() {
|
||||
b.EncryptionConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.TaggingConfigUpdatedAt.IsZero() {
|
||||
b.TaggingConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.ObjectLockConfigUpdatedAt.IsZero() {
|
||||
b.ObjectLockConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.QuotaConfigUpdatedAt.IsZero() {
|
||||
b.QuotaConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.ReplicationConfigUpdatedAt.IsZero() {
|
||||
b.ReplicationConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.VersioningConfigUpdatedAt.IsZero() {
|
||||
b.VersioningConfigUpdatedAt = b.Created
|
||||
}
|
||||
}
|
||||
|
||||
// Save config to supplied ObjectLayer api.
|
||||
func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
|
||||
if err := b.parseAllConfigs(ctx, api); err != nil {
|
||||
|
||||
@@ -108,6 +108,48 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
|
||||
return
|
||||
}
|
||||
case "PolicyConfigUpdatedAt":
|
||||
z.PolicyConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PolicyConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "ObjectLockConfigUpdatedAt":
|
||||
z.ObjectLockConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectLockConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "EncryptionConfigUpdatedAt":
|
||||
z.EncryptionConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "EncryptionConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "TaggingConfigUpdatedAt":
|
||||
z.TaggingConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TaggingConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "QuotaConfigUpdatedAt":
|
||||
z.QuotaConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QuotaConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "ReplicationConfigUpdatedAt":
|
||||
z.ReplicationConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "VersioningConfigUpdatedAt":
|
||||
z.VersioningConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "VersioningConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -121,9 +163,9 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 14
|
||||
// map header, size 21
|
||||
// write "Name"
|
||||
err = en.Append(0x8e, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
err = en.Append(0xde, 0x0, 0x15, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -262,15 +304,85 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
|
||||
return
|
||||
}
|
||||
// write "PolicyConfigUpdatedAt"
|
||||
err = en.Append(0xb5, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.PolicyConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PolicyConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "ObjectLockConfigUpdatedAt"
|
||||
err = en.Append(0xb9, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.ObjectLockConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectLockConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "EncryptionConfigUpdatedAt"
|
||||
err = en.Append(0xb9, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.EncryptionConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "EncryptionConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "TaggingConfigUpdatedAt"
|
||||
err = en.Append(0xb6, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.TaggingConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TaggingConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "QuotaConfigUpdatedAt"
|
||||
err = en.Append(0xb4, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.QuotaConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QuotaConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "ReplicationConfigUpdatedAt"
|
||||
err = en.Append(0xba, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.ReplicationConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "VersioningConfigUpdatedAt"
|
||||
err = en.Append(0xb9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.VersioningConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "VersioningConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 14
|
||||
// map header, size 21
|
||||
// string "Name"
|
||||
o = append(o, 0x8e, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = append(o, 0xde, 0x0, 0x15, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
// string "Created"
|
||||
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
|
||||
@@ -311,6 +423,27 @@ func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "BucketTargetsConfigMetaJSON"
|
||||
o = append(o, 0xbb, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x4a, 0x53, 0x4f, 0x4e)
|
||||
o = msgp.AppendBytes(o, z.BucketTargetsConfigMetaJSON)
|
||||
// string "PolicyConfigUpdatedAt"
|
||||
o = append(o, 0xb5, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.PolicyConfigUpdatedAt)
|
||||
// string "ObjectLockConfigUpdatedAt"
|
||||
o = append(o, 0xb9, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.ObjectLockConfigUpdatedAt)
|
||||
// string "EncryptionConfigUpdatedAt"
|
||||
o = append(o, 0xb9, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.EncryptionConfigUpdatedAt)
|
||||
// string "TaggingConfigUpdatedAt"
|
||||
o = append(o, 0xb6, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.TaggingConfigUpdatedAt)
|
||||
// string "QuotaConfigUpdatedAt"
|
||||
o = append(o, 0xb4, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.QuotaConfigUpdatedAt)
|
||||
// string "ReplicationConfigUpdatedAt"
|
||||
o = append(o, 0xba, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.ReplicationConfigUpdatedAt)
|
||||
// string "VersioningConfigUpdatedAt"
|
||||
o = append(o, 0xb9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.VersioningConfigUpdatedAt)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -416,6 +549,48 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
|
||||
return
|
||||
}
|
||||
case "PolicyConfigUpdatedAt":
|
||||
z.PolicyConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PolicyConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "ObjectLockConfigUpdatedAt":
|
||||
z.ObjectLockConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectLockConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "EncryptionConfigUpdatedAt":
|
||||
z.EncryptionConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "EncryptionConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "TaggingConfigUpdatedAt":
|
||||
z.TaggingConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TaggingConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "QuotaConfigUpdatedAt":
|
||||
z.QuotaConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QuotaConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "ReplicationConfigUpdatedAt":
|
||||
z.ReplicationConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "VersioningConfigUpdatedAt":
|
||||
z.VersioningConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "VersioningConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -430,6 +605,6 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BucketMetadata) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON)
|
||||
s = 3 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON) + 22 + msgp.TimeSize + 26 + msgp.TimeSize + 26 + msgp.TimeSize + 23 + msgp.TimeSize + 21 + msgp.TimeSize + 27 + msgp.TimeSize + 26 + msgp.TimeSize
|
||||
return
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention,
|
||||
return r, nil
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetObjectLockConfig(bucketName)
|
||||
config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucketName)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketObjectLockConfigNotFound); ok {
|
||||
return r, nil
|
||||
|
||||
@@ -113,7 +113,8 @@ func TestCreateBucket(t *testing.T) {
|
||||
|
||||
// testCreateBucket - Test for calling Create Bucket and ensure we get one and only one success.
|
||||
func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
|
||||
const n = 100
|
||||
@@ -378,7 +379,8 @@ func TestGetBucketPolicyHandler(t *testing.T) {
|
||||
|
||||
// testGetBucketPolicyHandler - Test for end point which fetches the access policy json of the given bucket.
|
||||
func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// template for constructing HTTP request body for PUT bucket policy.
|
||||
bucketPolicyTemplate := `{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s"]},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s/this*"]}]}`
|
||||
|
||||
|
||||
@@ -38,7 +38,8 @@ type PolicySys struct{}
|
||||
|
||||
// Get returns stored bucket policy
|
||||
func (sys *PolicySys) Get(bucket string) (*policy.Policy, error) {
|
||||
return globalBucketMetadataSys.GetPolicyConfig(bucket)
|
||||
policy, _, err := globalBucketMetadataSys.GetPolicyConfig(bucket)
|
||||
return policy, err
|
||||
}
|
||||
|
||||
// IsAllowed - checks given policy args is allowed to continue the Rest API.
|
||||
|
||||
@@ -42,8 +42,8 @@ func (sys *BucketQuotaSys) Get(ctx context.Context, bucketName string) (*madmin.
|
||||
}
|
||||
return &madmin.BucketQuota{}, nil
|
||||
}
|
||||
|
||||
return globalBucketMetadataSys.GetQuotaConfig(ctx, bucketName)
|
||||
qCfg, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucketName)
|
||||
return qCfg, err
|
||||
}
|
||||
|
||||
// NewBucketQuotaSys returns initialized BucketQuotaSys
|
||||
|
||||
404
cmd/bucket-replication-handlers.go
Normal file
404
cmd/bucket-replication-handlers.go
Normal file
@@ -0,0 +1,404 @@
|
||||
// Copyright (c) 2015-2022 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
// PutBucketReplicationConfigHandler - PUT Bucket replication configuration.
|
||||
// ----------
|
||||
// Add a replication configuration on the specified bucket as specified in https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html
|
||||
func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketReplicationConfig")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
if globalIsGateway {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if versioned := globalBucketVersioningSys.Enabled(bucket); !versioned {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNeedsVersioningError), r.URL)
|
||||
return
|
||||
}
|
||||
replicationConfig, err := replication.ParseConfig(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
apiErr.Description = err.Error()
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
sameTarget, apiErr := validateReplicationDestination(ctx, bucket, replicationConfig, true)
|
||||
if apiErr != noError {
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
// Validate the received bucket replication config
|
||||
if err = replicationConfig.Validate(bucket, sameTarget); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(replicationConfig)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketReplicationConfigHandler - GET Bucket replication configuration.
|
||||
// ----------
|
||||
// Gets the replication configuration for a bucket.
|
||||
func (api objectAPIHandlers) GetBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketReplicationConfig")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// check if user has permissions to perform this operation
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, configData)
|
||||
}
|
||||
|
||||
// DeleteBucketReplicationConfigHandler - DELETE Bucket replication config.
|
||||
// ----------
|
||||
func (api objectAPIHandlers) DeleteBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketReplicationConfig")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationDenyEditError), r.URL)
|
||||
return
|
||||
}
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketReplicationMetricsHandler - GET Bucket replication metrics.
|
||||
// ----------
|
||||
// Gets the replication metrics for a bucket.
|
||||
func (api objectAPIHandlers) GetBucketReplicationMetricsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketReplicationMetrics")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// check if user has permissions to perform this operation
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
var usageInfo BucketUsageInfo
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err == nil && !dataUsageInfo.LastUpdate.IsZero() {
|
||||
usageInfo = dataUsageInfo.BucketsUsage[bucket]
|
||||
}
|
||||
|
||||
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
if err = enc.Encode(getLatestReplicationStats(bucket, usageInfo)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ResetBucketReplicationStartHandler - starts a replication reset for all objects in a bucket which
|
||||
// qualify for replication and re-sync the object(s) to target, provided ExistingObjectReplication is
|
||||
// enabled for the qualifying rule. This API is a MinIO only extension provided for situations where
|
||||
// remote target is entirely lost,and previously replicated objects need to be re-synced. If resync is
|
||||
// already in progress it returns an error
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationStart")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
durationStr := r.URL.Query().Get("older-than")
|
||||
arn := r.URL.Query().Get("arn")
|
||||
resetID := r.URL.Query().Get("reset-id")
|
||||
if resetID == "" {
|
||||
resetID = mustGetUUID()
|
||||
}
|
||||
var (
|
||||
days time.Duration
|
||||
err error
|
||||
)
|
||||
if durationStr != "" {
|
||||
days, err = time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("invalid query parameter older-than %s for %s : %w", durationStr, bucket, err),
|
||||
}), r.URL)
|
||||
}
|
||||
}
|
||||
resetBeforeDate := UTCNow().AddDate(0, 0, -1*int(days/24))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ResetBucketReplicationStateAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if !config.HasExistingObjectReplication(arn) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNoExistingObjects), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
tgtArns := config.FilterTargetArns(
|
||||
replication.ObjectOpts{
|
||||
OpType: replication.ResyncReplicationType,
|
||||
TargetArn: arn,
|
||||
})
|
||||
|
||||
if len(tgtArns) == 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("Remote target ARN %s missing or ineligible for replication resync", arn),
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if len(tgtArns) > 1 && arn == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("ARN should be specified for replication reset"),
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
var rinfo ResyncTargetsInfo
|
||||
target := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, tgtArns[0])
|
||||
target.ResetBeforeDate = UTCNow().AddDate(0, 0, -1*int(days/24))
|
||||
target.ResetID = resetID
|
||||
rinfo.Targets = append(rinfo.Targets, ResyncTarget{Arn: tgtArns[0], ResetID: target.ResetID})
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, true); err != nil {
|
||||
switch err.(type) {
|
||||
case BucketRemoteConnectionErr:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
}
|
||||
}
|
||||
if err := startReplicationResync(ctx, bucket, arn, resetID, resetBeforeDate, objectAPI); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: err,
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(rinfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// ResetBucketReplicationStatusHandler - returns the status of replication reset.
|
||||
// This API is a MinIO only extension
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationStatus")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arn := r.URL.Query().Get("arn")
|
||||
var err error
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ResetBucketReplicationStateAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
globalReplicationPool.resyncState.RLock()
|
||||
brs, ok := globalReplicationPool.resyncState.statusMap[bucket]
|
||||
globalReplicationPool.resyncState.RUnlock()
|
||||
if !ok {
|
||||
brs, err = loadBucketResyncMetadata(ctx, bucket, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("No replication resync status available for %s", arn),
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var rinfo ResyncTargetsInfo
|
||||
for tarn, st := range brs.TargetsMap {
|
||||
if arn != "" && tarn != arn {
|
||||
continue
|
||||
}
|
||||
rinfo.Targets = append(rinfo.Targets, ResyncTarget{
|
||||
Arn: tarn,
|
||||
ResetID: st.ResyncID,
|
||||
StartTime: st.StartTime,
|
||||
EndTime: st.EndTime,
|
||||
ResyncStatus: st.ResyncStatus.String(),
|
||||
ReplicatedSize: st.ReplicatedSize,
|
||||
ReplicatedCount: st.ReplicatedCount,
|
||||
FailedSize: st.FailedSize,
|
||||
FailedCount: st.FailedCount,
|
||||
Bucket: st.Bucket,
|
||||
Object: st.Object,
|
||||
})
|
||||
}
|
||||
data, err := json.Marshal(rinfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
@@ -135,6 +135,23 @@ func (r *ReplicationStats) GetInitialUsage(bucket string) BucketReplicationStats
|
||||
return st.Clone()
|
||||
}
|
||||
|
||||
// GetAll returns replication metrics for all buckets at once.
|
||||
func (r *ReplicationStats) GetAll() map[string]BucketReplicationStats {
|
||||
if r == nil {
|
||||
return map[string]BucketReplicationStats{}
|
||||
}
|
||||
|
||||
r.RLock()
|
||||
defer r.RUnlock()
|
||||
|
||||
bucketReplicationStats := make(map[string]BucketReplicationStats, len(r.Cache))
|
||||
for k, v := range r.Cache {
|
||||
bucketReplicationStats[k] = v.Clone()
|
||||
}
|
||||
|
||||
return bucketReplicationStats
|
||||
}
|
||||
|
||||
// Get replication metrics for a bucket from this node since this node came up.
|
||||
func (r *ReplicationStats) Get(bucket string) BucketReplicationStats {
|
||||
if r == nil {
|
||||
@@ -161,7 +178,7 @@ func NewReplicationStats(ctx context.Context, objectAPI ObjectLayer) *Replicatio
|
||||
|
||||
// load replication metrics at cluster start from initial data usage
|
||||
func (r *ReplicationStats) loadInitialReplicationMetrics(ctx context.Context) {
|
||||
rTimer := time.NewTimer(time.Minute * 1)
|
||||
rTimer := time.NewTimer(time.Minute)
|
||||
defer rTimer.Stop()
|
||||
var (
|
||||
dui DataUsageInfo
|
||||
@@ -174,13 +191,12 @@ outer:
|
||||
return
|
||||
case <-rTimer.C:
|
||||
dui, err = loadDataUsageFromBackend(GlobalContext, newObjectLayerFn())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// If LastUpdate is set, data usage is available.
|
||||
if !dui.LastUpdate.IsZero() {
|
||||
if err == nil && !dui.LastUpdate.IsZero() {
|
||||
break outer
|
||||
}
|
||||
|
||||
rTimer.Reset(time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -509,7 +509,7 @@ func getHealReplicateObjectInfo(objInfo ObjectInfo, rcfg replicationConfig) Repl
|
||||
ObjectName: oi.Name,
|
||||
VersionID: oi.VersionID,
|
||||
},
|
||||
}, oi, ObjectOptions{}, nil)
|
||||
}, oi, ObjectOptions{VersionSuspended: globalBucketVersioningSys.PrefixSuspended(oi.Bucket, oi.Name)}, nil)
|
||||
} else {
|
||||
dsc = mustReplicate(GlobalContext, oi.Bucket, oi.Name, getMustReplicateOptions(ObjectInfo{
|
||||
UserDefined: oi.UserDefined,
|
||||
|
||||
@@ -81,12 +81,13 @@ func getReplicationConfig(ctx context.Context, bucketName string) (rc *replicati
|
||||
return rc, BucketReplicationConfigNotFound{Bucket: bucketName}
|
||||
}
|
||||
|
||||
return globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName)
|
||||
rCfg, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName)
|
||||
return rCfg, err
|
||||
}
|
||||
|
||||
// validateReplicationDestination returns error if replication destination bucket missing or not configured
|
||||
// It also returns true if replication destination is same as this server.
|
||||
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config) (bool, APIError) {
|
||||
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config, checkRemote bool) (bool, APIError) {
|
||||
var arns []string
|
||||
if rCfg.RoleArn != "" {
|
||||
arns = append(arns, rCfg.RoleArn)
|
||||
@@ -95,26 +96,29 @@ func validateReplicationDestination(ctx context.Context, bucket string, rCfg *re
|
||||
arns = append(arns, rule.Destination.String())
|
||||
}
|
||||
}
|
||||
var sameTarget bool
|
||||
for _, arnStr := range arns {
|
||||
arn, err := madmin.ParseARN(arnStr)
|
||||
if err != nil {
|
||||
return false, errorCodes.ToAPIErrWithErr(ErrBucketRemoteArnInvalid, err)
|
||||
return sameTarget, errorCodes.ToAPIErrWithErr(ErrBucketRemoteArnInvalid, err)
|
||||
}
|
||||
if arn.Type != madmin.ReplicationService {
|
||||
return false, toAPIError(ctx, BucketRemoteArnTypeInvalid{Bucket: bucket})
|
||||
return sameTarget, toAPIError(ctx, BucketRemoteArnTypeInvalid{Bucket: bucket})
|
||||
}
|
||||
clnt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arnStr)
|
||||
if clnt == nil {
|
||||
return false, toAPIError(ctx, BucketRemoteTargetNotFound{Bucket: bucket})
|
||||
return sameTarget, toAPIError(ctx, BucketRemoteTargetNotFound{Bucket: bucket})
|
||||
}
|
||||
if found, err := clnt.BucketExists(ctx, arn.Bucket); !found {
|
||||
return false, errorCodes.ToAPIErrWithErr(ErrRemoteDestinationNotFoundError, err)
|
||||
}
|
||||
if ret, err := globalBucketObjectLockSys.Get(bucket); err == nil {
|
||||
if ret.LockEnabled {
|
||||
lock, _, _, _, err := clnt.GetObjectLockConfig(ctx, arn.Bucket)
|
||||
if err != nil || lock != "Enabled" {
|
||||
return false, errorCodes.ToAPIErrWithErr(ErrReplicationDestinationMissingLock, err)
|
||||
if checkRemote { // validate remote bucket
|
||||
if found, err := clnt.BucketExists(ctx, arn.Bucket); !found {
|
||||
return sameTarget, errorCodes.ToAPIErrWithErr(ErrRemoteDestinationNotFoundError, err)
|
||||
}
|
||||
if ret, err := globalBucketObjectLockSys.Get(bucket); err == nil {
|
||||
if ret.LockEnabled {
|
||||
lock, _, _, _, err := clnt.GetObjectLockConfig(ctx, arn.Bucket)
|
||||
if err != nil || lock != "Enabled" {
|
||||
return sameTarget, errorCodes.ToAPIErrWithErr(ErrReplicationDestinationMissingLock, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -122,12 +126,19 @@ func validateReplicationDestination(ctx context.Context, bucket string, rCfg *re
|
||||
c, ok := globalBucketTargetSys.arnRemotesMap[arnStr]
|
||||
if ok {
|
||||
if c.EndpointURL().String() == clnt.EndpointURL().String() {
|
||||
sameTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
|
||||
return sameTarget, toAPIError(ctx, nil)
|
||||
selfTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
|
||||
if !sameTarget {
|
||||
sameTarget = selfTarget
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, toAPIError(ctx, BucketRemoteTargetNotFound{Bucket: bucket})
|
||||
|
||||
if len(arns) == 0 {
|
||||
return false, toAPIError(ctx, BucketRemoteTargetNotFound{Bucket: bucket})
|
||||
}
|
||||
return sameTarget, toAPIError(ctx, nil)
|
||||
}
|
||||
|
||||
type mustReplicateOptions struct {
|
||||
@@ -179,6 +190,17 @@ func mustReplicate(ctx context.Context, bucket, object string, mopts mustReplica
|
||||
return
|
||||
}
|
||||
|
||||
// object layer not initialized we return with no decision.
|
||||
if newObjectLayerFn() == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Disable server-side replication on object prefixes which are excluded
|
||||
// from versioning via the MinIO bucket versioning extension.
|
||||
if globalBucketVersioningSys.PrefixSuspended(bucket, object) {
|
||||
return
|
||||
}
|
||||
|
||||
replStatus := mopts.ReplicationStatus()
|
||||
if replStatus == replication.Replica && !mopts.isMetadataReplication() {
|
||||
return
|
||||
@@ -263,6 +285,11 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
|
||||
if delOpts.ReplicationRequest {
|
||||
return
|
||||
}
|
||||
// Skip replication if this object's prefix is excluded from being
|
||||
// versioned.
|
||||
if !delOpts.Versioned {
|
||||
return
|
||||
}
|
||||
opts := replication.ObjectOpts{
|
||||
Name: dobj.ObjectName,
|
||||
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
|
||||
@@ -425,7 +452,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
||||
wg.Add(1)
|
||||
go func(index int, tgt *TargetClient) {
|
||||
defer wg.Done()
|
||||
rinfo := replicateDeleteToTarget(ctx, dobj, objectAPI, tgt)
|
||||
rinfo := replicateDeleteToTarget(ctx, dobj, tgt)
|
||||
rinfos.Targets[index] = rinfo
|
||||
}(idx, tgt)
|
||||
}
|
||||
@@ -456,8 +483,8 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
||||
VersionID: versionID,
|
||||
MTime: dobj.DeleteMarkerMTime.Time,
|
||||
DeleteReplication: drs,
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
Versioned: globalBucketVersioningSys.PrefixEnabled(bucket, dobj.ObjectName),
|
||||
VersionSuspended: globalBucketVersioningSys.PrefixSuspended(bucket, dobj.ObjectName),
|
||||
})
|
||||
if err != nil && !isErrVersionNotFound(err) { // VersionNotFound would be reported by pool that object version is missing on.
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %s", bucket, dobj.ObjectName, versionID, err))
|
||||
@@ -482,7 +509,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
||||
}
|
||||
}
|
||||
|
||||
func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationInfo, objectAPI ObjectLayer, tgt *TargetClient) (rinfo replicatedTargetInfo) {
|
||||
func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationInfo, tgt *TargetClient) (rinfo replicatedTargetInfo) {
|
||||
versionID := dobj.DeleteMarkerVersionID
|
||||
if versionID == "" {
|
||||
versionID = dobj.VersionID
|
||||
@@ -1018,8 +1045,13 @@ func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, object
|
||||
return
|
||||
}
|
||||
|
||||
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
|
||||
versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
|
||||
|
||||
gr, err = objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
VersionID: objInfo.VersionID,
|
||||
Versioned: versioned,
|
||||
VersionSuspended: versionSuspended,
|
||||
})
|
||||
if err != nil {
|
||||
sendEvent(eventArgs{
|
||||
@@ -1589,7 +1621,11 @@ func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs
|
||||
return reader, proxyResult{Proxy: true}, nil
|
||||
}
|
||||
|
||||
func getproxyTargets(ctx context.Context, bucket, object string, opts ObjectOptions) (tgts *madmin.BucketTargets) {
|
||||
func getProxyTargets(ctx context.Context, bucket, object string, opts ObjectOptions) (tgts *madmin.BucketTargets) {
|
||||
if opts.VersionSuspended {
|
||||
return &madmin.BucketTargets{}
|
||||
}
|
||||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || cfg == nil {
|
||||
return &madmin.BucketTargets{}
|
||||
@@ -1726,10 +1762,6 @@ func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc *Repli
|
||||
if c.Empty() {
|
||||
return
|
||||
}
|
||||
// existing object replication does not apply to un-versioned objects
|
||||
if oi.VersionID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Now overlay existing object replication choices for target
|
||||
if oi.DeleteMarker {
|
||||
@@ -1830,9 +1862,26 @@ func resyncTarget(oi ObjectInfo, arn string, resetID string, resetBeforeDate tim
|
||||
return
|
||||
}
|
||||
|
||||
// get the most current of in-memory replication stats and data usage info from crawler.
|
||||
func getLatestReplicationStats(bucket string, u BucketUsageInfo) (s BucketReplicationStats) {
|
||||
bucketStats := globalNotificationSys.GetClusterBucketStats(GlobalContext, bucket)
|
||||
func getAllLatestReplicationStats(bucketsUsage map[string]BucketUsageInfo) (bucketsReplicationStats map[string]BucketReplicationStats) {
|
||||
peerBucketStatsList := globalNotificationSys.GetClusterAllBucketStats(GlobalContext)
|
||||
bucketsReplicationStats = make(map[string]BucketReplicationStats, len(bucketsUsage))
|
||||
|
||||
for bucket, u := range bucketsUsage {
|
||||
bucketStats := make([]BucketStats, len(peerBucketStatsList))
|
||||
for i, peerBucketStats := range peerBucketStatsList {
|
||||
bucketStat, ok := peerBucketStats[bucket]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
bucketStats[i] = bucketStat
|
||||
}
|
||||
bucketsReplicationStats[bucket] = calculateBucketReplicationStats(bucket, u, bucketStats)
|
||||
}
|
||||
|
||||
return bucketsReplicationStats
|
||||
}
|
||||
|
||||
func calculateBucketReplicationStats(bucket string, u BucketUsageInfo, bucketStats []BucketStats) (s BucketReplicationStats) {
|
||||
// accumulate cluster bucket stats
|
||||
stats := make(map[string]*BucketReplicationStat)
|
||||
var totReplicaSize int64
|
||||
@@ -1901,6 +1950,12 @@ func getLatestReplicationStats(bucket string, u BucketUsageInfo) (s BucketReplic
|
||||
return s
|
||||
}
|
||||
|
||||
// get the most current of in-memory replication stats and data usage info from crawler.
|
||||
func getLatestReplicationStats(bucket string, u BucketUsageInfo) (s BucketReplicationStats) {
|
||||
bucketStats := globalNotificationSys.GetClusterBucketStats(GlobalContext, bucket)
|
||||
return calculateBucketReplicationStats(bucket, u, bucketStats)
|
||||
}
|
||||
|
||||
const resyncTimeInterval = time.Minute * 10
|
||||
|
||||
// periodicResyncMetaSave saves in-memory resync meta stats to disk in periodic intervals
|
||||
@@ -1911,7 +1966,6 @@ func (p *ReplicationPool) periodicResyncMetaSave(ctx context.Context, objectAPI
|
||||
for {
|
||||
select {
|
||||
case <-resyncTimer.C:
|
||||
resyncTimer.Reset(resyncTimeInterval)
|
||||
now := UTCNow()
|
||||
p.resyncState.RLock()
|
||||
for bucket, brs := range p.resyncState.statusMap {
|
||||
@@ -1932,6 +1986,8 @@ func (p *ReplicationPool) periodicResyncMetaSave(ctx context.Context, objectAPI
|
||||
}
|
||||
}
|
||||
p.resyncState.RUnlock()
|
||||
|
||||
resyncTimer.Reset(resyncTimeInterval)
|
||||
case <-ctx.Done():
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
@@ -1951,6 +2007,7 @@ func resyncBucket(ctx context.Context, bucket, arn string, heal bool, objectAPI
|
||||
st.EndTime = UTCNow()
|
||||
st.ResyncStatus = resyncStatus
|
||||
m.TargetsMap[arn] = st
|
||||
globalReplicationPool.resyncState.statusMap[bucket] = m
|
||||
globalReplicationPool.resyncState.Unlock()
|
||||
}()
|
||||
// Allocate new results channel to receive ObjectInfo.
|
||||
@@ -2011,7 +2068,6 @@ func resyncBucket(ctx context.Context, bucket, arn string, heal bool, objectAPI
|
||||
}
|
||||
|
||||
if roi.DeleteMarker || !roi.VersionPurgeStatus.Empty() {
|
||||
|
||||
versionID := ""
|
||||
dmVersionID := ""
|
||||
if roi.VersionPurgeStatus.Empty() {
|
||||
@@ -2058,6 +2114,7 @@ func resyncBucket(ctx context.Context, bucket, arn string, heal bool, objectAPI
|
||||
st.ReplicatedSize += roi.Size
|
||||
}
|
||||
m.TargetsMap[arn] = st
|
||||
globalReplicationPool.resyncState.statusMap[bucket] = m
|
||||
globalReplicationPool.resyncState.Unlock()
|
||||
}
|
||||
resyncStatus = ResyncCompleted
|
||||
@@ -2138,10 +2195,6 @@ func (p *ReplicationPool) initResync(ctx context.Context, buckets []BucketInfo,
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
// replication applies only to erasure coded setups
|
||||
if !globalIsErasure {
|
||||
return nil
|
||||
}
|
||||
// Load bucket metadata sys in background
|
||||
go p.loadResync(ctx, buckets, objAPI)
|
||||
return nil
|
||||
|
||||
@@ -51,6 +51,9 @@ func (rl *ReplicationLatency) update(size int64, duration time.Duration) {
|
||||
rl.UploadHistogram.Add(size, duration)
|
||||
}
|
||||
|
||||
// BucketStatsMap captures bucket statistics for all buckets
|
||||
type BucketStatsMap map[string]BucketStats
|
||||
|
||||
// BucketStats bucket statistics
|
||||
type BucketStats struct {
|
||||
ReplicationStats BucketReplicationStats
|
||||
|
||||
@@ -792,6 +792,183 @@ func (z *BucketStats) Msgsize() (s int) {
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BucketStatsMap) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var zb0003 uint32
|
||||
zb0003, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if (*z) == nil {
|
||||
(*z) = make(BucketStatsMap, zb0003)
|
||||
} else if len((*z)) > 0 {
|
||||
for key := range *z {
|
||||
delete((*z), key)
|
||||
}
|
||||
}
|
||||
for zb0003 > 0 {
|
||||
zb0003--
|
||||
var zb0001 string
|
||||
var zb0002 BucketStats
|
||||
zb0001, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0004 uint32
|
||||
zb0004, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
for zb0004 > 0 {
|
||||
zb0004--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ReplicationStats":
|
||||
err = zb0002.ReplicationStats.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
(*z)[zb0001] = zb0002
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z BucketStatsMap) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = en.WriteMapHeader(uint32(len(z)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0005, zb0006 := range z {
|
||||
err = en.WriteString(zb0005)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
// map header, size 1
|
||||
// write "ReplicationStats"
|
||||
err = en.Append(0x81, 0xb0, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = zb0006.ReplicationStats.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0005, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z BucketStatsMap) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z)))
|
||||
for zb0005, zb0006 := range z {
|
||||
o = msgp.AppendString(o, zb0005)
|
||||
// map header, size 1
|
||||
// string "ReplicationStats"
|
||||
o = append(o, 0x81, 0xb0, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73)
|
||||
o, err = zb0006.ReplicationStats.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0005, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BucketStatsMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var zb0003 uint32
|
||||
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if (*z) == nil {
|
||||
(*z) = make(BucketStatsMap, zb0003)
|
||||
} else if len((*z)) > 0 {
|
||||
for key := range *z {
|
||||
delete((*z), key)
|
||||
}
|
||||
}
|
||||
for zb0003 > 0 {
|
||||
var zb0001 string
|
||||
var zb0002 BucketStats
|
||||
zb0003--
|
||||
zb0001, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0004 uint32
|
||||
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
for zb0004 > 0 {
|
||||
zb0004--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ReplicationStats":
|
||||
bts, err = zb0002.ReplicationStats.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
(*z)[zb0001] = zb0002
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z BucketStatsMap) Msgsize() (s int) {
|
||||
s = msgp.MapHeaderSize
|
||||
if z != nil {
|
||||
for zb0005, zb0006 := range z {
|
||||
_ = zb0006
|
||||
s += msgp.StringPrefixSize + len(zb0005) + 1 + 17 + zb0006.ReplicationStats.Msgsize()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *ReplicationLatency) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
|
||||
@@ -348,6 +348,119 @@ func BenchmarkDecodeBucketStats(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBucketStatsMap(t *testing.T) {
|
||||
v := BucketStatsMap{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBucketStatsMap(b *testing.B) {
|
||||
v := BucketStatsMap{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBucketStatsMap(b *testing.B) {
|
||||
v := BucketStatsMap{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBucketStatsMap(b *testing.B) {
|
||||
v := BucketStatsMap{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBucketStatsMap(t *testing.T) {
|
||||
v := BucketStatsMap{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBucketStatsMap Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BucketStatsMap{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBucketStatsMap(b *testing.B) {
|
||||
v := BucketStatsMap{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBucketStatsMap(b *testing.B) {
|
||||
v := BucketStatsMap{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalReplicationLatency(t *testing.T) {
|
||||
v := ReplicationLatency{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
|
||||
@@ -120,9 +120,6 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if tgt.Type == madmin.ReplicationService {
|
||||
if !globalIsErasure {
|
||||
return NotImplemented{Message: "Replication is not implemented in " + getMinioMode()}
|
||||
}
|
||||
if !globalBucketVersioningSys.Enabled(bucket) {
|
||||
return BucketReplicationSourceNotVersioned{Bucket: bucket}
|
||||
}
|
||||
@@ -184,9 +181,6 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
if !globalIsErasure {
|
||||
return NotImplemented{Message: "Replication is not implemented in " + getMinioMode()}
|
||||
}
|
||||
|
||||
if arnStr == "" {
|
||||
return BucketRemoteArnInvalid{Bucket: bucket}
|
||||
@@ -201,7 +195,7 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
// reject removal of remote target if replication configuration is present
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err == nil {
|
||||
for _, tgtArn := range rcfg.FilterTargetArns(replication.ObjectOpts{}) {
|
||||
for _, tgtArn := range rcfg.FilterTargetArns(replication.ObjectOpts{OpType: replication.AllReplicationType}) {
|
||||
if err == nil && (tgtArn == arnStr || rcfg.RoleArn == arnStr) {
|
||||
sys.RLock()
|
||||
_, ok := sys.arnRemotesMap[arnStr]
|
||||
|
||||
@@ -18,12 +18,14 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
@@ -63,28 +65,28 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
if globalSiteReplicationSys.isEnabled() && !v.Enabled() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "Cluster replication is enabled for this site, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
Description: "Cluster replication is enabled on this site, versioning cannot be suspended on bucket.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled && v.Suspended() {
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled && (v.Suspended() || v.PrefixesExcluded()) {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "An Object Lock configuration is present on this bucket, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
Description: "An Object Lock configuration is present on this bucket, versioning cannot be suspended.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}, r.URL)
|
||||
return
|
||||
}
|
||||
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "A replication configuration is present on this bucket, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
Description: "A replication configuration is present on this bucket, bucket wide versioning cannot be suspended.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}, r.URL)
|
||||
return
|
||||
}
|
||||
@@ -100,6 +102,20 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
//
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeVersionConfig,
|
||||
Bucket: bucket,
|
||||
Versioning: &cfgStr,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
|
||||
@@ -17,44 +17,69 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import "github.com/minio/minio/internal/bucket/versioning"
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
// BucketVersioningSys - policy subsystem.
|
||||
type BucketVersioningSys struct{}
|
||||
|
||||
// Enabled enabled versioning?
|
||||
func (sys *BucketVersioningSys) Enabled(bucket string) bool {
|
||||
vc, err := globalBucketMetadataSys.GetVersioningConfig(bucket)
|
||||
vc, err := sys.Get(bucket)
|
||||
if err != nil {
|
||||
return false
|
||||
logger.CriticalIf(GlobalContext, err)
|
||||
}
|
||||
return vc.Enabled()
|
||||
}
|
||||
|
||||
// PrefixEnabled returns true is versioning is enabled at bucket level and if
|
||||
// the given prefix doesn't match any excluded prefixes pattern. This is
|
||||
// part of a MinIO versioning configuration extension.
|
||||
func (sys *BucketVersioningSys) PrefixEnabled(bucket, prefix string) bool {
|
||||
vc, err := sys.Get(bucket)
|
||||
if err != nil {
|
||||
logger.CriticalIf(GlobalContext, err)
|
||||
}
|
||||
return vc.PrefixEnabled(prefix)
|
||||
}
|
||||
|
||||
// Suspended suspended versioning?
|
||||
func (sys *BucketVersioningSys) Suspended(bucket string) bool {
|
||||
vc, err := globalBucketMetadataSys.GetVersioningConfig(bucket)
|
||||
vc, err := sys.Get(bucket)
|
||||
if err != nil {
|
||||
return false
|
||||
logger.CriticalIf(GlobalContext, err)
|
||||
}
|
||||
return vc.Suspended()
|
||||
}
|
||||
|
||||
// PrefixSuspended returns true if the given prefix matches an excluded prefix
|
||||
// pattern. This is part of a MinIO versioning configuration extension.
|
||||
func (sys *BucketVersioningSys) PrefixSuspended(bucket, prefix string) bool {
|
||||
vc, err := sys.Get(bucket)
|
||||
if err != nil {
|
||||
logger.CriticalIf(GlobalContext, err)
|
||||
}
|
||||
|
||||
return vc.PrefixSuspended(prefix)
|
||||
}
|
||||
|
||||
// Get returns stored bucket policy
|
||||
func (sys *BucketVersioningSys) Get(bucket string) (*versioning.Versioning, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
return nil, NotImplemented{}
|
||||
// Gateway does not implement versioning.
|
||||
return &versioning.Versioning{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/"}, nil
|
||||
}
|
||||
return globalBucketMetadataSys.GetVersioningConfig(bucket)
|
||||
}
|
||||
|
||||
// Reset BucketVersioningSys to initial state.
|
||||
func (sys *BucketVersioningSys) Reset() {
|
||||
// There is currently no internal state.
|
||||
if bucket == minioMetaBucket || strings.HasPrefix(bucket, minioMetaBucket) {
|
||||
return &versioning.Versioning{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/"}, nil
|
||||
}
|
||||
|
||||
vcfg, _, err := globalBucketMetadataSys.GetVersioningConfig(bucket)
|
||||
return vcfg, err
|
||||
}
|
||||
|
||||
// NewBucketVersioningSys - creates new versioning system.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2022 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@@ -37,4 +37,7 @@ var (
|
||||
|
||||
// ShortCommitID - first 12 characters from CommitID.
|
||||
ShortCommitID = "DEVELOPMENT.GOGET"
|
||||
|
||||
// CopyrightYear - dynamic value of the copyright end year
|
||||
CopyrightYear = "0000"
|
||||
)
|
||||
|
||||
136
cmd/callhome.go
Normal file
136
cmd/callhome.go
Normal file
@@ -0,0 +1,136 @@
|
||||
// Copyright (c) 2015-2022 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
uatomic "go.uber.org/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
// callhomeSchemaVersion1 is callhome schema version 1
|
||||
callhomeSchemaVersion1 = "1"
|
||||
|
||||
// callhomeSchemaVersion is current callhome schema version.
|
||||
callhomeSchemaVersion = callhomeSchemaVersion1
|
||||
|
||||
// callhomeCycleDefault is the default interval between two callhome cycles (24hrs)
|
||||
callhomeCycleDefault = 24 * time.Hour
|
||||
)
|
||||
|
||||
// CallhomeInfo - Contains callhome information
|
||||
type CallhomeInfo struct {
|
||||
SchemaVersion string `json:"schema_version"`
|
||||
AdminInfo madmin.InfoMessage `json:"admin_info"`
|
||||
}
|
||||
|
||||
var (
|
||||
enableCallhome = uatomic.NewBool(false)
|
||||
callhomeLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
callhomeFreq = uatomic.NewDuration(callhomeCycleDefault)
|
||||
)
|
||||
|
||||
func updateCallhomeParams(ctx context.Context, objAPI ObjectLayer) {
|
||||
alreadyEnabled := enableCallhome.Load()
|
||||
enableCallhome.Store(globalCallhomeConfig.Enable)
|
||||
callhomeFreq.Store(globalCallhomeConfig.Frequency)
|
||||
|
||||
// If callhome was disabled earlier and has now been enabled,
|
||||
// initialize the callhome process again.
|
||||
if !alreadyEnabled && enableCallhome.Load() {
|
||||
initCallhome(ctx, objAPI)
|
||||
}
|
||||
}
|
||||
|
||||
// initCallhome will start the callhome task in the background.
|
||||
func initCallhome(ctx context.Context, objAPI ObjectLayer) {
|
||||
go func() {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
// Leader node (that successfully acquires the lock inside runCallhome)
|
||||
// will keep performing the callhome. If the leader goes down for some reason,
|
||||
// the lock will be released and another node will acquire it and take over
|
||||
// because of this loop.
|
||||
for {
|
||||
runCallhome(ctx, objAPI)
|
||||
if !enableCallhome.Load() {
|
||||
return
|
||||
}
|
||||
|
||||
// callhome running on a different node.
|
||||
// sleep for some time and try again.
|
||||
duration := time.Duration(r.Float64() * float64(callhomeFreq.Load()))
|
||||
if duration < time.Second {
|
||||
// Make sure to sleep atleast a second to avoid high CPU ticks.
|
||||
duration = time.Second
|
||||
}
|
||||
time.Sleep(duration)
|
||||
|
||||
if !enableCallhome.Load() {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func runCallhome(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Make sure only 1 callhome is running on the cluster.
|
||||
locker := objAPI.NewNSLock(minioMetaBucket, "callhome/runCallhome.lock")
|
||||
lkctx, err := locker.GetLock(ctx, callhomeLeaderLockTimeout)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx = lkctx.Context()
|
||||
defer locker.Unlock(lkctx.Cancel)
|
||||
|
||||
callhomeTimer := time.NewTimer(callhomeFreq.Load())
|
||||
defer callhomeTimer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-callhomeTimer.C:
|
||||
if !enableCallhome.Load() {
|
||||
// Stop the processing as callhome got disabled
|
||||
return
|
||||
}
|
||||
performCallhome(ctx)
|
||||
|
||||
// Reset the timer for next cycle.
|
||||
callhomeTimer.Reset(callhomeFreq.Load())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func performCallhome(ctx context.Context) {
|
||||
err := sendCallhomeInfo(
|
||||
CallhomeInfo{
|
||||
SchemaVersion: callhomeSchemaVersion,
|
||||
AdminInfo: getServerInfo(ctx, nil),
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err))
|
||||
}
|
||||
}
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/gob"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -32,6 +33,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
@@ -100,12 +102,16 @@ func init() {
|
||||
PersistOnFailure: false,
|
||||
}
|
||||
|
||||
globalIsCICD = env.Get("MINIO_CI_CD", "") != "" || env.Get("CI", "") != ""
|
||||
|
||||
containers := IsKubernetes() || IsDocker() || IsBOSH() || IsDCOS() || IsPCFTile()
|
||||
|
||||
// Call to refresh will refresh names in cache. If you pass true, it will also
|
||||
// remove cached names not looked up since the last call to Refresh. It is a good idea
|
||||
// to call this method on a regular interval.
|
||||
go func() {
|
||||
var t *time.Ticker
|
||||
if IsKubernetes() || IsDocker() || IsBOSH() || IsDCOS() || IsPCFTile() {
|
||||
if containers {
|
||||
t = time.NewTicker(1 * time.Minute)
|
||||
} else {
|
||||
t = time.NewTicker(10 * time.Minute)
|
||||
@@ -172,11 +178,22 @@ func minioConfigToConsoleFeatures() {
|
||||
os.Setenv("CONSOLE_LOG_QUERY_AUTH_TOKEN", value)
|
||||
}
|
||||
}
|
||||
// pass the console subpath configuration
|
||||
if value := env.Get(config.EnvMinIOBrowserRedirectURL, ""); value != "" {
|
||||
subPath := path.Clean(pathJoin(strings.TrimSpace(globalBrowserRedirectURL.Path), SlashSeparator))
|
||||
if subPath != SlashSeparator {
|
||||
os.Setenv("CONSOLE_SUBPATH", subPath)
|
||||
}
|
||||
}
|
||||
// Enable if prometheus URL is set.
|
||||
if value := env.Get("MINIO_PROMETHEUS_URL", ""); value != "" {
|
||||
os.Setenv("CONSOLE_PROMETHEUS_URL", value)
|
||||
if value := env.Get("MINIO_PROMETHEUS_JOB_ID", "minio-job"); value != "" {
|
||||
os.Setenv("CONSOLE_PROMETHEUS_JOB_ID", value)
|
||||
// Support additional labels for more granular filtering.
|
||||
if value := env.Get("MINIO_PROMETHEUS_EXTRA_LABELS", ""); value != "" {
|
||||
os.Setenv("CONSOLE_PROMETHEUS_EXTRA_LABELS", value)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Enable if LDAP is enabled.
|
||||
@@ -184,23 +201,23 @@ func minioConfigToConsoleFeatures() {
|
||||
os.Setenv("CONSOLE_LDAP_ENABLED", config.EnableOn)
|
||||
}
|
||||
// if IDP is enabled, set IDP environment variables
|
||||
if globalOpenIDConfig.URL != nil {
|
||||
os.Setenv("CONSOLE_IDP_URL", globalOpenIDConfig.URL.String())
|
||||
os.Setenv("CONSOLE_IDP_CLIENT_ID", globalOpenIDConfig.ClientID)
|
||||
os.Setenv("CONSOLE_IDP_SECRET", globalOpenIDConfig.ClientSecret)
|
||||
if globalOpenIDConfig.ProviderCfgs[config.Default] != nil {
|
||||
os.Setenv("CONSOLE_IDP_URL", globalOpenIDConfig.ProviderCfgs[config.Default].URL.String())
|
||||
os.Setenv("CONSOLE_IDP_CLIENT_ID", globalOpenIDConfig.ProviderCfgs[config.Default].ClientID)
|
||||
os.Setenv("CONSOLE_IDP_SECRET", globalOpenIDConfig.ProviderCfgs[config.Default].ClientSecret)
|
||||
os.Setenv("CONSOLE_IDP_HMAC_SALT", globalDeploymentID)
|
||||
os.Setenv("CONSOLE_IDP_HMAC_PASSPHRASE", globalOpenIDConfig.ClientID)
|
||||
os.Setenv("CONSOLE_IDP_SCOPES", strings.Join(globalOpenIDConfig.DiscoveryDoc.ScopesSupported, ","))
|
||||
if globalOpenIDConfig.ClaimUserinfo {
|
||||
os.Setenv("CONSOLE_IDP_HMAC_PASSPHRASE", globalOpenIDConfig.ProviderCfgs[config.Default].ClientID)
|
||||
os.Setenv("CONSOLE_IDP_SCOPES", strings.Join(globalOpenIDConfig.ProviderCfgs[config.Default].DiscoveryDoc.ScopesSupported, ","))
|
||||
if globalOpenIDConfig.ProviderCfgs[config.Default].ClaimUserinfo {
|
||||
os.Setenv("CONSOLE_IDP_USERINFO", config.EnableOn)
|
||||
}
|
||||
if globalOpenIDConfig.RedirectURIDynamic {
|
||||
if globalOpenIDConfig.ProviderCfgs[config.Default].RedirectURIDynamic {
|
||||
// Enable dynamic redirect-uri's based on incoming 'host' header,
|
||||
// Overrides any other callback URL.
|
||||
os.Setenv("CONSOLE_IDP_CALLBACK_DYNAMIC", config.EnableOn)
|
||||
}
|
||||
if globalOpenIDConfig.RedirectURI != "" {
|
||||
os.Setenv("CONSOLE_IDP_CALLBACK", globalOpenIDConfig.RedirectURI)
|
||||
if globalOpenIDConfig.ProviderCfgs[config.Default].RedirectURI != "" {
|
||||
os.Setenv("CONSOLE_IDP_CALLBACK", globalOpenIDConfig.ProviderCfgs[config.Default].RedirectURI)
|
||||
} else {
|
||||
os.Setenv("CONSOLE_IDP_CALLBACK", getConsoleEndpoints()[0]+"/oauth_callback")
|
||||
}
|
||||
@@ -213,8 +230,8 @@ func minioConfigToConsoleFeatures() {
|
||||
if globalSubnetConfig.APIKey != "" {
|
||||
os.Setenv("CONSOLE_SUBNET_API_KEY", globalSubnetConfig.APIKey)
|
||||
}
|
||||
if globalSubnetConfig.Proxy != "" {
|
||||
os.Setenv("CONSOLE_SUBNET_PROXY", globalSubnetConfig.Proxy)
|
||||
if globalSubnetConfig.ProxyURL != nil {
|
||||
os.Setenv("CONSOLE_SUBNET_PROXY", globalSubnetConfig.ProxyURL.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -482,6 +499,13 @@ func parsEnvEntry(envEntry string) (envKV, error) {
|
||||
key := envTokens[0]
|
||||
val := envTokens[1]
|
||||
|
||||
if strings.HasPrefix(key, "#") {
|
||||
// Skip commented lines
|
||||
return envKV{
|
||||
Skip: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Remove quotes from the value if found
|
||||
if len(val) >= 2 {
|
||||
quote := val[0]
|
||||
@@ -502,9 +526,6 @@ func parsEnvEntry(envEntry string) (envKV, error) {
|
||||
func minioEnvironFromFile(envConfigFile string) ([]envKV, error) {
|
||||
f, err := os.Open(envConfigFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) { // ignore if file doesn't exist.
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
@@ -600,7 +621,7 @@ func loadEnvVarsFromFiles() {
|
||||
|
||||
if env.IsSet(config.EnvConfigEnvFile) {
|
||||
ekvs, err := minioEnvironFromFile(env.Get(config.EnvConfigEnvFile, ""))
|
||||
if err != nil {
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
logger.Fatal(err, "Unable to read the config environment file")
|
||||
}
|
||||
for _, ekv := range ekvs {
|
||||
@@ -625,12 +646,11 @@ func handleCommonEnvVars() {
|
||||
}
|
||||
// Look for if URL has invalid values and return error.
|
||||
if !((u.Scheme == "http" || u.Scheme == "https") &&
|
||||
(u.Path == "/" || u.Path == "") && u.Opaque == "" &&
|
||||
u.Opaque == "" &&
|
||||
!u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
|
||||
err := fmt.Errorf("URL contains unexpected resources, expected URL to be of http(s)://minio.example.com format: %v", u)
|
||||
logger.Fatal(err, "Invalid MINIO_BROWSER_REDIRECT_URL value is environment variable")
|
||||
}
|
||||
u.Path = "" // remove any path component such as `/`
|
||||
globalBrowserRedirectURL = u
|
||||
}
|
||||
}
|
||||
@@ -664,8 +684,6 @@ func handleCommonEnvVars() {
|
||||
globalRootDiskThreshold = size
|
||||
}
|
||||
|
||||
globalIsCICD = env.Get("MINIO_CI_CD", "") != "" || env.Get("CI", "") != ""
|
||||
|
||||
domains := env.Get(config.EnvDomain, "")
|
||||
if len(domains) != 0 {
|
||||
for _, domainName := range strings.Split(domains, config.ValueSeparator) {
|
||||
@@ -766,17 +784,22 @@ func handleCommonEnvVars() {
|
||||
}
|
||||
globalActiveCred = cred
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize KMS global variable after valiadating and loading the configuration.
|
||||
// It depends on KMS env variables and global cli flags.
|
||||
func handleKMSConfig() {
|
||||
switch {
|
||||
case env.IsSet(config.EnvKMSSecretKey) && env.IsSet(config.EnvKESEndpoint):
|
||||
logger.Fatal(errors.New("ambigious KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", config.EnvKMSSecretKey, config.EnvKESEndpoint))
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvKMSSecretKey) {
|
||||
GlobalKMS, err = kms.Parse(env.Get(config.EnvKMSSecretKey, ""))
|
||||
KMS, err := kms.Parse(env.Get(config.EnvKMSSecretKey, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to parse the KMS secret key inherited from the shell environment")
|
||||
}
|
||||
GlobalKMS = KMS
|
||||
}
|
||||
if env.IsSet(config.EnvKESEndpoint) {
|
||||
var endpoints []string
|
||||
@@ -796,7 +819,29 @@ func handleCommonEnvVars() {
|
||||
endpoints = append(endpoints, strings.Join(lbls, ""))
|
||||
}
|
||||
}
|
||||
certificate, err := tls.LoadX509KeyPair(env.Get(config.EnvKESClientCert, ""), env.Get(config.EnvKESClientKey, ""))
|
||||
// Manually load the certificate and private key into memory.
|
||||
// We need to check whether the private key is encrypted, and
|
||||
// if so, decrypt it using the user-provided password.
|
||||
certBytes, err := os.ReadFile(env.Get(config.EnvKESClientCert, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to load KES client certificate as specified by the shell environment")
|
||||
}
|
||||
keyBytes, err := os.ReadFile(env.Get(config.EnvKESClientKey, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to load KES client private key as specified by the shell environment")
|
||||
}
|
||||
privateKeyPEM, rest := pem.Decode(bytes.TrimSpace(keyBytes))
|
||||
if len(rest) != 0 {
|
||||
logger.Fatal(errors.New("private key contains additional data"), "Unable to load KES client private key as specified by the shell environment")
|
||||
}
|
||||
if x509.IsEncryptedPEMBlock(privateKeyPEM) {
|
||||
keyBytes, err = x509.DecryptPEMBlock(privateKeyPEM, []byte(env.Get(config.EnvKESClientPassword, "")))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to decrypt KES client private key as specified by the shell environment")
|
||||
}
|
||||
keyBytes = pem.EncodeToMemory(&pem.Block{Type: privateKeyPEM.Type, Bytes: keyBytes})
|
||||
}
|
||||
certificate, err := tls.X509KeyPair(certBytes, keyBytes)
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to load KES client certificate as specified by the shell environment")
|
||||
}
|
||||
|
||||
@@ -134,6 +134,24 @@ export MINIO_ROOT_PASSWORD=minio123`,
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
`
|
||||
# MINIO_ROOT_USER=minioadmin
|
||||
# MINIO_ROOT_PASSWORD=minioadmin
|
||||
MINIO_ROOT_USER=minio
|
||||
MINIO_ROOT_PASSWORD=minio123`,
|
||||
false,
|
||||
[]envKV{
|
||||
{
|
||||
Key: "MINIO_ROOT_USER",
|
||||
Value: "minio",
|
||||
},
|
||||
{
|
||||
Key: "MINIO_ROOT_PASSWORD",
|
||||
Value: "minio123",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
@@ -153,6 +171,11 @@ export MINIO_ROOT_PASSWORD=minio123`,
|
||||
if err == nil && testCase.expectedErr {
|
||||
t.Error(errors.New("expected error, found success"))
|
||||
}
|
||||
|
||||
if len(ekvs) != len(testCase.expectedEkvs) {
|
||||
t.Errorf("expected %v keys, got %v keys", len(testCase.expectedEkvs), len(ekvs))
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ekvs, testCase.expectedEkvs) {
|
||||
t.Errorf("expected %v, got %v", testCase.expectedEkvs, ekvs)
|
||||
}
|
||||
|
||||
@@ -28,15 +28,18 @@ import (
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/config/api"
|
||||
"github.com/minio/minio/internal/config/cache"
|
||||
"github.com/minio/minio/internal/config/callhome"
|
||||
"github.com/minio/minio/internal/config/compress"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/config/etcd"
|
||||
"github.com/minio/minio/internal/config/heal"
|
||||
xldap "github.com/minio/minio/internal/config/identity/ldap"
|
||||
"github.com/minio/minio/internal/config/identity/openid"
|
||||
idplugin "github.com/minio/minio/internal/config/identity/plugin"
|
||||
xtls "github.com/minio/minio/internal/config/identity/tls"
|
||||
"github.com/minio/minio/internal/config/notify"
|
||||
"github.com/minio/minio/internal/config/policy/opa"
|
||||
polplugin "github.com/minio/minio/internal/config/policy/plugin"
|
||||
"github.com/minio/minio/internal/config/scanner"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/config/subnet"
|
||||
@@ -55,7 +58,9 @@ func initHelp() {
|
||||
config.IdentityLDAPSubSys: xldap.DefaultKVS,
|
||||
config.IdentityOpenIDSubSys: openid.DefaultKVS,
|
||||
config.IdentityTLSSubSys: xtls.DefaultKVS,
|
||||
config.IdentityPluginSubSys: idplugin.DefaultKVS,
|
||||
config.PolicyOPASubSys: opa.DefaultKVS,
|
||||
config.PolicyPluginSubSys: polplugin.DefaultKVS,
|
||||
config.SiteSubSys: config.DefaultSiteKVS,
|
||||
config.RegionSubSys: config.DefaultRegionKVS,
|
||||
config.APISubSys: api.DefaultKVS,
|
||||
@@ -66,6 +71,7 @@ func initHelp() {
|
||||
config.HealSubSys: heal.DefaultKVS,
|
||||
config.ScannerSubSys: scanner.DefaultKVS,
|
||||
config.SubnetSubSys: subnet.DefaultKVS,
|
||||
config.CallhomeSubSys: callhome.DefaultKVS,
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
kvs[k] = v
|
||||
@@ -94,8 +100,9 @@ func initHelp() {
|
||||
Description: "federate multiple clusters for IAM and Bucket DNS",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.IdentityOpenIDSubSys,
|
||||
Description: "enable OpenID SSO support",
|
||||
Key: config.IdentityOpenIDSubSys,
|
||||
Description: "enable OpenID SSO support",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.IdentityLDAPSubSys,
|
||||
@@ -106,8 +113,12 @@ func initHelp() {
|
||||
Description: "enable X.509 TLS certificate SSO support",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.PolicyOPASubSys,
|
||||
Description: "[DEPRECATED] enable external OPA for policy enforcement",
|
||||
Key: config.IdentityPluginSubSys,
|
||||
Description: "enable Identity Plugin via external hook",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.PolicyPluginSubSys,
|
||||
Description: "enable Access Management Plugin for policy enforcement",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.APISubSys,
|
||||
@@ -192,6 +203,12 @@ func initHelp() {
|
||||
Description: "set subnet config for the cluster e.g. api key",
|
||||
Optional: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CallhomeSubSys,
|
||||
Type: "string",
|
||||
Description: "enable callhome for the cluster",
|
||||
Optional: true,
|
||||
},
|
||||
}
|
||||
|
||||
if globalIsErasure {
|
||||
@@ -217,7 +234,9 @@ func initHelp() {
|
||||
config.IdentityOpenIDSubSys: openid.Help,
|
||||
config.IdentityLDAPSubSys: xldap.Help,
|
||||
config.IdentityTLSSubSys: xtls.Help,
|
||||
config.IdentityPluginSubSys: idplugin.Help,
|
||||
config.PolicyOPASubSys: opa.Help,
|
||||
config.PolicyPluginSubSys: polplugin.Help,
|
||||
config.LoggerWebhookSubSys: logger.Help,
|
||||
config.AuditWebhookSubSys: logger.HelpWebhook,
|
||||
config.AuditKafkaSubSys: logger.HelpKafka,
|
||||
@@ -232,6 +251,7 @@ func initHelp() {
|
||||
config.NotifyWebhookSubSys: notify.HelpWebhook,
|
||||
config.NotifyESSubSys: notify.HelpES,
|
||||
config.SubnetSubSys: subnet.HelpSubnet,
|
||||
config.CallhomeSubSys: callhome.HelpCallhome,
|
||||
}
|
||||
|
||||
config.RegisterHelpSubSys(helpMap)
|
||||
@@ -242,6 +262,10 @@ func initHelp() {
|
||||
Key: config.RegionSubSys,
|
||||
Description: "[DEPRECATED - use `site` instead] label the location of the server",
|
||||
},
|
||||
config.PolicyOPASubSys: {
|
||||
Key: config.PolicyOPASubSys,
|
||||
Description: "[DEPRECATED - use `policy_plugin` instead] enable external OPA for policy enforcement",
|
||||
},
|
||||
}
|
||||
|
||||
config.RegisterHelpDeprecatedSubSys(deprecatedHelpKVMap)
|
||||
@@ -314,7 +338,7 @@ func validateSubSysConfig(s config.Config, subSys string, objAPI ObjectLayer) er
|
||||
etcdClnt.Close()
|
||||
}
|
||||
case config.IdentityOpenIDSubSys:
|
||||
if _, err := openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
if _, err := openid.LookupConfig(s,
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -334,14 +358,34 @@ func validateSubSysConfig(s config.Config, subSys string, objAPI ObjectLayer) er
|
||||
if _, err := xtls.Lookup(s[config.IdentityTLSSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.IdentityPluginSubSys:
|
||||
if _, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.SubnetSubSys:
|
||||
if _, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.CallhomeSubSys:
|
||||
if _, err := callhome.LookupConfig(s[config.CallhomeSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.PolicyOPASubSys:
|
||||
if _, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
|
||||
// In case legacy OPA config is being set, we treat it as if the
|
||||
// AuthZPlugin is being set.
|
||||
subSys = config.PolicyPluginSubSys
|
||||
fallthrough
|
||||
case config.PolicyPluginSubSys:
|
||||
if ppargs, err := polplugin.LookupConfig(s[config.PolicyPluginSubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
return err
|
||||
} else if ppargs.URL == nil {
|
||||
// Check if legacy opa is configured.
|
||||
if _, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
if config.LoggerSubSystems.Contains(subSys) {
|
||||
@@ -484,22 +528,6 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
||||
getRemoteInstanceTransport = newGatewayHTTPTransport(apiConfig.RemoteTransportDeadline)
|
||||
})
|
||||
|
||||
if globalIsErasure && objAPI != nil {
|
||||
setDriveCounts := objAPI.SetDriveCounts()
|
||||
for i, setDriveCount := range setDriveCounts {
|
||||
sc, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
|
||||
break
|
||||
}
|
||||
// if we validated all setDriveCounts and it was successful
|
||||
// proceed to store the correct storage class globally.
|
||||
if i == len(setDriveCounts)-1 {
|
||||
globalStorageClass.Update(sc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
globalCacheConfig, err = cache.LookupConfig(s[config.CacheSubSys][config.Default])
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
@@ -532,27 +560,45 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
||||
logger.LogIf(ctx, fmt.Errorf("CRITICAL: enabling %s is not recommended in a production environment", xtls.EnvIdentityTLSSkipVerify))
|
||||
}
|
||||
|
||||
globalOpenIDConfig, err = openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
globalOpenIDConfig, err = openid.LookupConfig(s,
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody, globalSite.Region)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err))
|
||||
}
|
||||
|
||||
opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OPA: %w", err))
|
||||
}
|
||||
|
||||
globalOpenIDValidators = getOpenIDValidators(globalOpenIDConfig)
|
||||
globalPolicyOPA = opa.New(opaCfg)
|
||||
|
||||
globalLDAPConfig, err = xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
|
||||
globalRootCAs)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to parse LDAP configuration: %w", err))
|
||||
}
|
||||
|
||||
authNPluginCfg, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody, globalSite.Region)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthNPlugin: %w", err))
|
||||
}
|
||||
globalAuthNPlugin = idplugin.New(authNPluginCfg)
|
||||
|
||||
authZPluginCfg, err := polplugin.LookupConfig(s[config.PolicyPluginSubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin: %w", err))
|
||||
}
|
||||
if authZPluginCfg.URL == nil {
|
||||
opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin from legacy OPA config: %w", err))
|
||||
} else {
|
||||
authZPluginCfg.URL = opaCfg.URL
|
||||
authZPluginCfg.AuthToken = opaCfg.AuthToken
|
||||
authZPluginCfg.Transport = opaCfg.Transport
|
||||
authZPluginCfg.CloseRespFn = opaCfg.CloseRespFn
|
||||
}
|
||||
}
|
||||
|
||||
setGlobalAuthZPlugin(polplugin.New(authZPluginCfg))
|
||||
|
||||
globalSubnetConfig, err = subnet.LookupConfig(s[config.SubnetSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err))
|
||||
@@ -616,7 +662,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
return fmt.Errorf("Unable to apply scanner config: %w", err)
|
||||
}
|
||||
// update dynamic scanner values.
|
||||
scannerCycle.Update(scannerCfg.Cycle)
|
||||
scannerCycle.Store(scannerCfg.Cycle)
|
||||
logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
|
||||
case config.LoggerWebhookSubSys:
|
||||
loggerCfg, err := logger.LookupConfigForSubSys(s, config.LoggerWebhookSubSys)
|
||||
@@ -626,7 +672,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
userAgent := getUserAgent(getMinioMode())
|
||||
for n, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
l.LogOnce = logger.LogOnceConsoleIf
|
||||
l.UserAgent = userAgent
|
||||
l.Transport = NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
|
||||
loggerCfg.HTTP[n] = l
|
||||
@@ -644,7 +690,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
userAgent := getUserAgent(getMinioMode())
|
||||
for n, l := range loggerCfg.AuditWebhook {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
l.LogOnce = logger.LogOnceConsoleIf
|
||||
l.UserAgent = userAgent
|
||||
l.Transport = NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
|
||||
loggerCfg.AuditWebhook[n] = l
|
||||
@@ -670,6 +716,30 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update audit kafka targets: %w", err))
|
||||
}
|
||||
case config.StorageClassSubSys:
|
||||
if globalIsErasure && objAPI != nil {
|
||||
setDriveCounts := objAPI.SetDriveCounts()
|
||||
for i, setDriveCount := range setDriveCounts {
|
||||
sc, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
|
||||
break
|
||||
}
|
||||
// if we validated all setDriveCounts and it was successful
|
||||
// proceed to store the correct storage class globally.
|
||||
if i == len(setDriveCounts)-1 {
|
||||
globalStorageClass.Update(sc)
|
||||
}
|
||||
}
|
||||
}
|
||||
case config.CallhomeSubSys:
|
||||
callhomeCfg, err := callhome.LookupConfig(s[config.CallhomeSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load callhome config: %w", err))
|
||||
} else {
|
||||
globalCallhomeConfig = callhomeCfg
|
||||
updateCallhomeParams(ctx, objAPI)
|
||||
}
|
||||
}
|
||||
globalServerConfigMu.Lock()
|
||||
defer globalServerConfigMu.Unlock()
|
||||
@@ -807,17 +877,3 @@ func loadConfig(objAPI ObjectLayer) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getOpenIDValidators - returns ValidatorList which contains
|
||||
// enabled providers in server config.
|
||||
// A new authentication provider is added like below
|
||||
// * Add a new provider in pkg/iam/openid package.
|
||||
func getOpenIDValidators(cfg openid.Config) *openid.Validators {
|
||||
validators := openid.NewValidators()
|
||||
|
||||
if cfg.Enabled {
|
||||
validators.Add(&cfg)
|
||||
}
|
||||
|
||||
return validators
|
||||
}
|
||||
|
||||
@@ -2649,7 +2649,6 @@ func migrateV30ToV31MinioSys(objAPI ObjectLayer) error {
|
||||
|
||||
cfg.Version = "31"
|
||||
cfg.OpenID = openid.Config{}
|
||||
cfg.OpenID.JWKS.URL = &xnet.URL{}
|
||||
|
||||
cfg.Policy.OPA = opa.Args{
|
||||
URL: &xnet.URL{},
|
||||
|
||||
@@ -45,7 +45,7 @@ type HTTPConsoleLoggerSys struct {
|
||||
// NewConsoleLogger - creates new HTTPConsoleLoggerSys with all nodes subscribed to
|
||||
// the console logging pub sub system
|
||||
func NewConsoleLogger(ctx context.Context) *HTTPConsoleLoggerSys {
|
||||
ps := pubsub.New()
|
||||
ps := pubsub.New(8)
|
||||
return &HTTPConsoleLoggerSys{
|
||||
pubsub: ps,
|
||||
console: console.New(),
|
||||
@@ -75,7 +75,7 @@ func (sys *HTTPConsoleLoggerSys) HasLogListeners() bool {
|
||||
}
|
||||
|
||||
// Subscribe starts console logging for this node.
|
||||
func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh <-chan struct{}, node string, last int, logKind string, filter func(entry interface{}) bool) {
|
||||
func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh <-chan struct{}, node string, last int, logKind string, filter func(entry interface{}) bool) error {
|
||||
// Enable console logging for remote client.
|
||||
if !sys.HasLogListeners() {
|
||||
logger.AddSystemTarget(sys)
|
||||
@@ -111,11 +111,12 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh <-chan
|
||||
select {
|
||||
case subCh <- entry:
|
||||
case <-doneCh:
|
||||
return
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
sys.pubsub.Subscribe(subCh, doneCh, filter)
|
||||
|
||||
return sys.pubsub.Subscribe(subCh, doneCh, filter)
|
||||
}
|
||||
|
||||
// Init if HTTPConsoleLoggerSys is valid, always returns nil right now
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
@@ -37,12 +38,14 @@ import (
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
"github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/color"
|
||||
"github.com/minio/minio/internal/config/heal"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/console"
|
||||
uatomic "go.uber.org/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -64,9 +67,7 @@ var (
|
||||
dataScannerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
// Sleeper values are updated when config is loaded.
|
||||
scannerSleeper = newDynamicSleeper(10, 10*time.Second)
|
||||
scannerCycle = &safeDuration{
|
||||
t: dataScannerStartDelay,
|
||||
}
|
||||
scannerCycle = uatomic.NewDuration(dataScannerStartDelay)
|
||||
)
|
||||
|
||||
// initDataScanner will start the scanner in the background.
|
||||
@@ -76,7 +77,7 @@ func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the data scanner in a loop
|
||||
for {
|
||||
runDataScanner(ctx, objAPI)
|
||||
duration := time.Duration(r.Float64() * float64(scannerCycle.Get()))
|
||||
duration := time.Duration(r.Float64() * float64(scannerCycle.Load()))
|
||||
if duration < time.Second {
|
||||
// Make sure to sleep atleast a second to avoid high CPU ticks.
|
||||
duration = time.Second
|
||||
@@ -86,21 +87,61 @@ func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
||||
}()
|
||||
}
|
||||
|
||||
type safeDuration struct {
|
||||
sync.Mutex
|
||||
t time.Duration
|
||||
func getCycleScanMode(currentCycle, bitrotStartCycle uint64, bitrotStartTime time.Time) madmin.HealScanMode {
|
||||
bitrotCycle := globalHealConfig.BitrotScanCycle()
|
||||
switch bitrotCycle {
|
||||
case -1:
|
||||
return madmin.HealNormalScan
|
||||
case 0:
|
||||
return madmin.HealDeepScan
|
||||
}
|
||||
|
||||
if currentCycle-bitrotStartCycle < healObjectSelectProb {
|
||||
return madmin.HealDeepScan
|
||||
}
|
||||
|
||||
if time.Since(bitrotStartTime) > bitrotCycle {
|
||||
return madmin.HealDeepScan
|
||||
}
|
||||
|
||||
return madmin.HealNormalScan
|
||||
}
|
||||
|
||||
func (s *safeDuration) Update(t time.Duration) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.t = t
|
||||
type backgroundHealInfo struct {
|
||||
BitrotStartTime time.Time `json:"bitrotStartTime"`
|
||||
BitrotStartCycle uint64 `json:"bitrotStartCycle"`
|
||||
CurrentScanMode madmin.HealScanMode `json:"currentScanMode"`
|
||||
}
|
||||
|
||||
func (s *safeDuration) Get() time.Duration {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.t
|
||||
func readBackgroundHealInfo(ctx context.Context, objAPI ObjectLayer) backgroundHealInfo {
|
||||
// Get last healing information
|
||||
buf, err := readConfig(ctx, objAPI, backgroundHealInfoPath)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errConfigNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return backgroundHealInfo{}
|
||||
}
|
||||
var info backgroundHealInfo
|
||||
err = json.Unmarshal(buf, &info)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return backgroundHealInfo{}
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
func saveBackgroundHealInfo(ctx context.Context, objAPI ObjectLayer, info backgroundHealInfo) {
|
||||
b, err := json.Marshal(info)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
// Get last healing information
|
||||
err = saveConfig(ctx, objAPI, backgroundHealInfoPath, b)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
// runDataScanner will start a data scanner.
|
||||
@@ -130,7 +171,7 @@ func runDataScanner(pctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
scannerTimer := time.NewTimer(scannerCycle.Get())
|
||||
scannerTimer := time.NewTimer(scannerCycle.Load())
|
||||
defer scannerTimer.Stop()
|
||||
|
||||
for {
|
||||
@@ -138,19 +179,28 @@ func runDataScanner(pctx context.Context, objAPI ObjectLayer) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-scannerTimer.C:
|
||||
// Reset the timer for next cycle.
|
||||
scannerTimer.Reset(scannerCycle.Get())
|
||||
|
||||
if intDataUpdateTracker.debug {
|
||||
console.Debugln("starting scanner cycle")
|
||||
}
|
||||
|
||||
bgHealInfo := readBackgroundHealInfo(ctx, objAPI)
|
||||
scanMode := getCycleScanMode(nextBloomCycle, bgHealInfo.BitrotStartCycle, bgHealInfo.BitrotStartTime)
|
||||
if bgHealInfo.CurrentScanMode != scanMode {
|
||||
newHealInfo := bgHealInfo
|
||||
newHealInfo.CurrentScanMode = scanMode
|
||||
if scanMode == madmin.HealDeepScan {
|
||||
newHealInfo.BitrotStartTime = time.Now().UTC()
|
||||
newHealInfo.BitrotStartCycle = nextBloomCycle
|
||||
}
|
||||
saveBackgroundHealInfo(ctx, objAPI, newHealInfo)
|
||||
}
|
||||
|
||||
// Wait before starting next cycle and wait on startup.
|
||||
results := make(chan DataUsageInfo, 1)
|
||||
go storeDataUsageInBackend(ctx, objAPI, results)
|
||||
bf, err := globalNotificationSys.updateBloomFilter(ctx, nextBloomCycle)
|
||||
logger.LogIf(ctx, err)
|
||||
err = objAPI.NSScanner(ctx, bf, results, uint32(nextBloomCycle))
|
||||
err = objAPI.NSScanner(ctx, bf, results, uint32(nextBloomCycle), scanMode)
|
||||
logger.LogIf(ctx, err)
|
||||
if err == nil {
|
||||
// Store new cycle...
|
||||
@@ -161,6 +211,9 @@ func runDataScanner(pctx context.Context, objAPI ObjectLayer) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the timer for next cycle.
|
||||
scannerTimer.Reset(scannerCycle.Load())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -182,6 +235,7 @@ type folderScanner struct {
|
||||
dataUsageScannerDebug bool
|
||||
healFolderInclude uint32 // Include a clean folder one in n cycles.
|
||||
healObjectSelect uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude
|
||||
scanMode madmin.HealScanMode
|
||||
|
||||
disks []StorageAPI
|
||||
disksQuorum int
|
||||
@@ -250,7 +304,7 @@ var globalScannerStats scannerStats
|
||||
// The returned cache will always be valid, but may not be updated from the existing.
|
||||
// Before each operation sleepDuration is called which can be used to temporarily halt the scanner.
|
||||
// If the supplied context is canceled the function will return at the first chance.
|
||||
func scanDataFolder(ctx context.Context, poolIdx, setIdx int, basePath string, cache dataUsageCache, getSize getSizeFn) (dataUsageCache, error) {
|
||||
func scanDataFolder(ctx context.Context, poolIdx, setIdx int, basePath string, cache dataUsageCache, getSize getSizeFn, scanMode madmin.HealScanMode) (dataUsageCache, error) {
|
||||
t := UTCNow()
|
||||
|
||||
logPrefix := color.Green("data-usage: ")
|
||||
@@ -279,6 +333,7 @@ func scanDataFolder(ctx context.Context, poolIdx, setIdx int, basePath string, c
|
||||
dataUsageScannerDebug: intDataUpdateTracker.debug,
|
||||
healFolderInclude: 0,
|
||||
healObjectSelect: 0,
|
||||
scanMode: scanMode,
|
||||
updates: cache.Info.updates,
|
||||
}
|
||||
|
||||
@@ -482,12 +537,15 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
||||
debug: f.dataUsageScannerDebug,
|
||||
lifeCycle: activeLifeCycle,
|
||||
replication: replicationCfg,
|
||||
heal: thisHash.modAlt(f.oldCache.Info.NextCycle/folder.objectHealProbDiv, f.healObjectSelect/folder.objectHealProbDiv) && globalIsErasure,
|
||||
}
|
||||
|
||||
item.heal.enabled = thisHash.modAlt(f.oldCache.Info.NextCycle/folder.objectHealProbDiv, f.healObjectSelect/folder.objectHealProbDiv) && globalIsErasure
|
||||
item.heal.bitrot = f.scanMode == madmin.HealDeepScan
|
||||
|
||||
// if the drive belongs to an erasure set
|
||||
// that is already being healed, skip the
|
||||
// healing attempt on this drive.
|
||||
item.heal = item.heal && f.healObjectSelect > 0
|
||||
item.heal.enabled = item.heal.enabled && f.healObjectSelect > 0
|
||||
|
||||
sz, err := f.getSize(item)
|
||||
if err != nil {
|
||||
@@ -821,8 +879,11 @@ type scannerItem struct {
|
||||
replication replicationConfig
|
||||
lifeCycle *lifecycle.Lifecycle
|
||||
Typ fs.FileMode
|
||||
heal bool // Has the object been selected for heal check?
|
||||
debug bool
|
||||
heal struct {
|
||||
enabled bool
|
||||
bitrot bool
|
||||
} // Has the object been selected for heal check?
|
||||
debug bool
|
||||
}
|
||||
|
||||
type sizeSummary struct {
|
||||
@@ -874,9 +935,13 @@ func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi Object
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath())
|
||||
}
|
||||
}
|
||||
scanMode := madmin.HealNormalScan
|
||||
if i.heal.bitrot {
|
||||
scanMode = madmin.HealDeepScan
|
||||
}
|
||||
healOpts := madmin.HealOpts{
|
||||
Remove: healDeleteDangling,
|
||||
ScanMode: globalHealConfig.ScanMode(),
|
||||
ScanMode: scanMode,
|
||||
}
|
||||
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), oi.VersionID, healOpts)
|
||||
if err != nil && !errors.Is(err, NotImplemented{}) {
|
||||
@@ -901,20 +966,8 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi Obje
|
||||
|
||||
atomic.AddUint64(&globalScannerStats.ilmChecks, 1)
|
||||
versionID := oi.VersionID
|
||||
action := i.lifeCycle.ComputeAction(
|
||||
lifecycle.ObjectOpts{
|
||||
Name: i.objectPath(),
|
||||
UserTags: oi.UserTags,
|
||||
ModTime: oi.ModTime,
|
||||
VersionID: oi.VersionID,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
IsLatest: oi.IsLatest,
|
||||
NumVersions: oi.NumVersions,
|
||||
SuccessorModTime: oi.SuccessorModTime,
|
||||
RestoreOngoing: oi.RestoreOngoing,
|
||||
RestoreExpires: oi.RestoreExpires,
|
||||
TransitionStatus: oi.TransitionedObject.Status,
|
||||
})
|
||||
rCfg, _ := globalBucketObjectLockSys.Get(i.bucket)
|
||||
action := evalActionFromLifecycle(ctx, *i.lifeCycle, rCfg, oi, false)
|
||||
if i.debug {
|
||||
if versionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" lifecycle: %q (version-id=%s), Initial scan: %v\n", i.objectPath(), versionID, action)
|
||||
@@ -986,9 +1039,13 @@ func (i *scannerItem) applyNewerNoncurrentVersionLimit(ctx context.Context, _ Ob
|
||||
fivs = fivs[:lim+1]
|
||||
|
||||
rcfg, _ := globalBucketObjectLockSys.Get(i.bucket)
|
||||
vcfg, _ := globalBucketVersioningSys.Get(i.bucket)
|
||||
|
||||
versioned := vcfg != nil && vcfg.Versioned(i.objectPath())
|
||||
|
||||
toDel := make([]ObjectToDelete, 0, len(overflowVersions))
|
||||
for _, fi := range overflowVersions {
|
||||
obj := fi.ToObjectInfo(i.bucket, i.objectPath())
|
||||
obj := fi.ToObjectInfo(i.bucket, i.objectPath(), versioned)
|
||||
// skip versions with object locking enabled
|
||||
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
|
||||
if i.debug {
|
||||
@@ -1040,7 +1097,7 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi Object
|
||||
// from the current deployment, which means we don't have to call healing
|
||||
// routine even if we are asked to do via heal flag.
|
||||
if !applied {
|
||||
if i.heal {
|
||||
if i.heal.enabled {
|
||||
size = i.applyHealing(ctx, o, oi)
|
||||
}
|
||||
// replicate only if lifecycle rules are not applied.
|
||||
@@ -1049,7 +1106,7 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi Object
|
||||
return size
|
||||
}
|
||||
|
||||
func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, obj ObjectInfo, debug bool) (action lifecycle.Action) {
|
||||
func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr lock.Retention, obj ObjectInfo, debug bool) (action lifecycle.Action) {
|
||||
action = lc.ComputeAction(obj.ToLifecycleOpts())
|
||||
if debug {
|
||||
console.Debugf(applyActionsLogPrefix+" lifecycle: Secondary scan: %v\n", action)
|
||||
@@ -1065,18 +1122,15 @@ func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, obj Ob
|
||||
if obj.VersionID == "" {
|
||||
return lifecycle.NoneAction
|
||||
}
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(obj.Bucket); rcfg.LockEnabled {
|
||||
locked := enforceRetentionForDeletion(ctx, obj)
|
||||
if locked {
|
||||
if debug {
|
||||
if obj.VersionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" lifecycle: %s v(%s) is locked, not deleting\n", obj.Name, obj.VersionID)
|
||||
} else {
|
||||
console.Debugf(applyActionsLogPrefix+" lifecycle: %s is locked, not deleting\n", obj.Name)
|
||||
}
|
||||
if lr.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
|
||||
if debug {
|
||||
if obj.VersionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" lifecycle: %s v(%s) is locked, not deleting\n", obj.Name, obj.VersionID)
|
||||
} else {
|
||||
console.Debugf(applyActionsLogPrefix+" lifecycle: %s is locked, not deleting\n", obj.Name)
|
||||
}
|
||||
return lifecycle.NoneAction
|
||||
}
|
||||
return lifecycle.NoneAction
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1116,7 +1170,8 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
|
||||
opts.VersionID = obj.VersionID
|
||||
}
|
||||
if opts.VersionID == "" {
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(obj.Bucket)
|
||||
opts.Versioned = globalBucketVersioningSys.PrefixEnabled(obj.Bucket, obj.Name)
|
||||
opts.VersionSuspended = globalBucketVersioningSys.PrefixSuspended(obj.Bucket, obj.Name)
|
||||
}
|
||||
|
||||
obj, err := objLayer.DeleteObject(ctx, obj.Bucket, obj.Name, opts)
|
||||
@@ -1175,6 +1230,9 @@ func (i *scannerItem) objectPath() string {
|
||||
// healReplication will heal a scanned item that has failed replication.
|
||||
func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) {
|
||||
roi := getHealReplicateObjectInfo(oi, i.replication)
|
||||
if !roi.Dsc.ReplicateAny() {
|
||||
return
|
||||
}
|
||||
|
||||
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
|
||||
// heal delete marker replication failure or versioned delete replication failure
|
||||
@@ -1195,32 +1253,31 @@ func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi Obj
|
||||
roi.OpType = replication.ExistingObjectReplicationType
|
||||
}
|
||||
|
||||
if roi.TargetStatuses != nil {
|
||||
if sizeS.replTargetStats == nil {
|
||||
sizeS.replTargetStats = make(map[string]replTargetSizeSummary)
|
||||
if sizeS.replTargetStats == nil && len(roi.TargetStatuses) > 0 {
|
||||
sizeS.replTargetStats = make(map[string]replTargetSizeSummary)
|
||||
}
|
||||
|
||||
for arn, tgtStatus := range roi.TargetStatuses {
|
||||
tgtSizeS, ok := sizeS.replTargetStats[arn]
|
||||
if !ok {
|
||||
tgtSizeS = replTargetSizeSummary{}
|
||||
}
|
||||
for arn, tgtStatus := range roi.TargetStatuses {
|
||||
tgtSizeS, ok := sizeS.replTargetStats[arn]
|
||||
if !ok {
|
||||
tgtSizeS = replTargetSizeSummary{}
|
||||
}
|
||||
switch tgtStatus {
|
||||
case replication.Pending:
|
||||
tgtSizeS.pendingCount++
|
||||
tgtSizeS.pendingSize += oi.Size
|
||||
sizeS.pendingCount++
|
||||
sizeS.pendingSize += oi.Size
|
||||
case replication.Failed:
|
||||
tgtSizeS.failedSize += oi.Size
|
||||
tgtSizeS.failedCount++
|
||||
sizeS.failedSize += oi.Size
|
||||
sizeS.failedCount++
|
||||
case replication.Completed, "COMPLETE":
|
||||
tgtSizeS.replicatedSize += oi.Size
|
||||
sizeS.replicatedSize += oi.Size
|
||||
}
|
||||
sizeS.replTargetStats[arn] = tgtSizeS
|
||||
switch tgtStatus {
|
||||
case replication.Pending:
|
||||
tgtSizeS.pendingCount++
|
||||
tgtSizeS.pendingSize += oi.Size
|
||||
sizeS.pendingCount++
|
||||
sizeS.pendingSize += oi.Size
|
||||
case replication.Failed:
|
||||
tgtSizeS.failedSize += oi.Size
|
||||
tgtSizeS.failedCount++
|
||||
sizeS.failedSize += oi.Size
|
||||
sizeS.failedCount++
|
||||
case replication.Completed, replication.CompletedLegacy:
|
||||
tgtSizeS.replicatedSize += oi.Size
|
||||
sizeS.replicatedSize += oi.Size
|
||||
}
|
||||
sizeS.replTargetStats[arn] = tgtSizeS
|
||||
}
|
||||
|
||||
switch oi.ReplicationStatus {
|
||||
|
||||
@@ -535,12 +535,13 @@ func (d *dataUsageCache) dui(path string, buckets []BucketInfo) DataUsageInfo {
|
||||
}
|
||||
flat := d.flatten(*e)
|
||||
dui := DataUsageInfo{
|
||||
LastUpdate: d.Info.LastUpdate,
|
||||
ObjectsTotalCount: flat.Objects,
|
||||
ObjectsTotalSize: uint64(flat.Size),
|
||||
BucketsCount: uint64(len(e.Children)),
|
||||
BucketsUsage: d.bucketsUsageInfo(buckets),
|
||||
TierStats: d.tiersUsageInfo(buckets),
|
||||
LastUpdate: d.Info.LastUpdate,
|
||||
ObjectsTotalCount: flat.Objects,
|
||||
VersionsTotalCount: flat.Versions,
|
||||
ObjectsTotalSize: uint64(flat.Size),
|
||||
BucketsCount: uint64(len(e.Children)),
|
||||
BucketsUsage: d.bucketsUsageInfo(buckets),
|
||||
TierStats: d.tiersUsageInfo(buckets),
|
||||
}
|
||||
return dui
|
||||
}
|
||||
@@ -788,6 +789,7 @@ func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]Bucke
|
||||
flat := d.flatten(*e)
|
||||
bui := BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
VersionsCount: flat.Versions,
|
||||
ObjectsCount: flat.Objects,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ type BucketUsageInfo struct {
|
||||
|
||||
ObjectsCount uint64 `json:"objectsCount"`
|
||||
ObjectSizesHistogram map[string]uint64 `json:"objectsSizesHistogram"`
|
||||
VersionsCount uint64 `json:"versionsCount"`
|
||||
ReplicaSize uint64 `json:"objectReplicaTotalSize"`
|
||||
ReplicationInfo map[string]BucketTargetUsageInfo `json:"objectsReplicationInfo"`
|
||||
}
|
||||
@@ -73,6 +74,9 @@ type DataUsageInfo struct {
|
||||
// Objects total count across all buckets
|
||||
ObjectsTotalCount uint64 `json:"objectsCount"`
|
||||
|
||||
// Objects total count across all buckets
|
||||
VersionsTotalCount uint64 `json:"versionsCount"`
|
||||
|
||||
// Objects total size across all buckets
|
||||
ObjectsTotalSize uint64 `json:"objectsTotalSize"`
|
||||
ReplicationInfo map[string]BucketTargetUsageInfo `json:"objectsReplicationInfo"`
|
||||
|
||||
@@ -35,6 +35,8 @@ const (
|
||||
dataUsageBloomName = ".bloomcycle.bin"
|
||||
dataUsageBloomNamePath = bucketMetaPrefix + SlashSeparator + dataUsageBloomName
|
||||
|
||||
backgroundHealInfoPath = bucketMetaPrefix + SlashSeparator + ".background-heal.json"
|
||||
|
||||
dataUsageCacheName = ".usage-cache.bin"
|
||||
)
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
got, err := scanDataFolder(context.Background(), 0, 0, base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
|
||||
got, err := scanDataFolder(context.Background(), 0, 0, base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -178,7 +178,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
}
|
||||
// Changed dir must be picked up in this many cycles.
|
||||
for i := 0; i < dataUsageUpdateDirCycles; i++ {
|
||||
got, err = scanDataFolder(context.Background(), 0, 0, base, got, getSize)
|
||||
got, err = scanDataFolder(context.Background(), 0, 0, base, got, getSize, 0)
|
||||
got.Info.NextCycle++
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -289,7 +289,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
}
|
||||
return
|
||||
}
|
||||
got, err := scanDataFolder(context.Background(), 0, 0, base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize)
|
||||
got, err := scanDataFolder(context.Background(), 0, 0, base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -423,7 +423,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
}
|
||||
// Changed dir must be picked up in this many cycles.
|
||||
for i := 0; i < dataUsageUpdateDirCycles; i++ {
|
||||
got, err = scanDataFolder(context.Background(), 0, 0, base, got, getSize)
|
||||
got, err = scanDataFolder(context.Background(), 0, 0, base, got, getSize, 0)
|
||||
got.Info.NextCycle++
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -575,7 +575,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
|
||||
}
|
||||
return
|
||||
}
|
||||
want, err := scanDataFolder(context.Background(), 0, 0, base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
|
||||
want, err := scanDataFolder(context.Background(), 0, 0, base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -884,6 +884,7 @@ func (c *diskCache) put(ctx context.Context, bucket, object string, data io.Read
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(cachePath, 0o777); err != nil {
|
||||
removeAll(cachePath)
|
||||
return oi, err
|
||||
}
|
||||
metadata := cloneMSS(opts.UserDefined)
|
||||
@@ -892,6 +893,7 @@ func (c *diskCache) put(ctx context.Context, bucket, object string, data io.Read
|
||||
if globalCacheKMS != nil {
|
||||
reader, err = newCacheEncryptReader(data, bucket, object, metadata)
|
||||
if err != nil {
|
||||
removeAll(cachePath)
|
||||
return oi, err
|
||||
}
|
||||
actualSize, _ = sio.EncryptedSize(uint64(size))
|
||||
@@ -1628,8 +1630,6 @@ func (c *diskCache) cleanupStaleUploads(ctx context.Context) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-timer.C:
|
||||
// Reset for the next interval
|
||||
timer.Reset(cacheStaleUploadCleanupInterval)
|
||||
now := time.Now()
|
||||
readDirFn(pathJoin(c.dir, minioMetaBucket, cacheMultipartDir), func(shaDir string, typ os.FileMode) error {
|
||||
return readDirFn(pathJoin(c.dir, minioMetaBucket, cacheMultipartDir, shaDir), func(uploadIDDir string, typ os.FileMode) error {
|
||||
@@ -1660,6 +1660,9 @@ func (c *diskCache) cleanupStaleUploads(ctx context.Context) {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Reset for the next interval
|
||||
timer.Reset(cacheStaleUploadCleanupInterval)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,19 +19,17 @@ package cmd
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/etag"
|
||||
)
|
||||
|
||||
// CacheStatusType - whether the request was served from cache.
|
||||
@@ -234,77 +232,85 @@ func isCacheEncrypted(meta map[string]string) bool {
|
||||
|
||||
// decryptCacheObjectETag tries to decrypt the ETag saved in encrypted format using the cache KMS
|
||||
func decryptCacheObjectETag(info *ObjectInfo) error {
|
||||
// Directories are never encrypted.
|
||||
if info.IsDir {
|
||||
return nil
|
||||
return nil // Directories are never encrypted.
|
||||
}
|
||||
encrypted := crypto.S3.IsEncrypted(info.UserDefined) && isCacheEncrypted(info.UserDefined)
|
||||
|
||||
switch {
|
||||
case encrypted:
|
||||
if globalCacheKMS == nil {
|
||||
return errKMSNotConfigured
|
||||
// Depending on the SSE type we handle ETags slightly
|
||||
// differently. ETags encrypted with SSE-S3 must be
|
||||
// decrypted first, since the client expects that
|
||||
// a single-part SSE-S3 ETag is equal to the content MD5.
|
||||
//
|
||||
// For all other SSE types, the ETag is not the content MD5.
|
||||
// Therefore, we don't decrypt but only format it.
|
||||
switch kind, ok := crypto.IsEncrypted(info.UserDefined); {
|
||||
case ok && kind == crypto.S3 && isCacheEncrypted(info.UserDefined):
|
||||
ETag, err := etag.Parse(info.ETag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(info.Parts) > 0 { // multipart ETag is not encrypted since it is not md5sum
|
||||
if !ETag.IsEncrypted() {
|
||||
info.ETag = ETag.Format().String()
|
||||
return nil
|
||||
}
|
||||
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(info.UserDefined)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
extKey, err := globalCacheKMS.DecryptKey(keyID, kmsKey, kms.Context{info.Bucket: path.Join(info.Bucket, info.Name)})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var objectKey crypto.ObjectKey
|
||||
if err = objectKey.Unseal(extKey, sealedKey, crypto.S3.String(), info.Bucket, info.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
etagStr := tryDecryptETag(objectKey[:], info.ETag, false)
|
||||
// backend ETag was hex encoded before encrypting, so hex decode to get actual ETag
|
||||
etag, err := hex.DecodeString(etagStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.ETag = string(etag)
|
||||
return nil
|
||||
}
|
||||
|
||||
key, err := crypto.S3.UnsealObjectKey(globalCacheKMS, info.UserDefined, info.Bucket, info.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ETag, err = etag.Decrypt(key[:], ETag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.ETag = ETag.Format().String()
|
||||
case ok && (kind == crypto.S3KMS || kind == crypto.SSEC) && isCacheEncrypted(info.UserDefined):
|
||||
ETag, err := etag.Parse(info.ETag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.ETag = ETag.Format().String()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decryptCacheObjectETag tries to decrypt the ETag saved in encrypted format using the cache KMS
|
||||
func decryptCachePartETags(c *cacheMeta) ([]string, error) {
|
||||
var partETags []string
|
||||
encrypted := crypto.S3.IsEncrypted(c.Meta) && isCacheEncrypted(c.Meta)
|
||||
|
||||
switch {
|
||||
case encrypted:
|
||||
if globalCacheKMS == nil {
|
||||
return partETags, errKMSNotConfigured
|
||||
}
|
||||
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(c.Meta)
|
||||
// Depending on the SSE type we handle ETags slightly
|
||||
// differently. ETags encrypted with SSE-S3 must be
|
||||
// decrypted first, since the client expects that
|
||||
// a single-part SSE-S3 ETag is equal to the content MD5.
|
||||
//
|
||||
// For all other SSE types, the ETag is not the content MD5.
|
||||
// Therefore, we don't decrypt but only format it.
|
||||
switch kind, ok := crypto.IsEncrypted(c.Meta); {
|
||||
case ok && kind == crypto.S3 && isCacheEncrypted(c.Meta):
|
||||
key, err := crypto.S3.UnsealObjectKey(globalCacheKMS, c.Meta, c.Bucket, c.Object)
|
||||
if err != nil {
|
||||
return partETags, err
|
||||
}
|
||||
extKey, err := globalCacheKMS.DecryptKey(keyID, kmsKey, kms.Context{c.Bucket: path.Join(c.Bucket, c.Object)})
|
||||
if err != nil {
|
||||
return partETags, err
|
||||
}
|
||||
var objectKey crypto.ObjectKey
|
||||
if err = objectKey.Unseal(extKey, sealedKey, crypto.S3.String(), c.Bucket, c.Object); err != nil {
|
||||
return partETags, err
|
||||
return nil, err
|
||||
}
|
||||
etags := make([]string, 0, len(c.PartETags))
|
||||
for i := range c.PartETags {
|
||||
etagStr := tryDecryptETag(objectKey[:], c.PartETags[i], false)
|
||||
// backend ETag was hex encoded before encrypting, so hex decode to get actual ETag
|
||||
etag, err := hex.DecodeString(etagStr)
|
||||
ETag, err := etag.Parse(c.PartETags[i])
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
return nil, err
|
||||
}
|
||||
partETags = append(partETags, string(etag))
|
||||
ETag, err = etag.Decrypt(key[:], ETag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
etags = append(etags, ETag.Format().String())
|
||||
}
|
||||
return partETags, nil
|
||||
return etags, nil
|
||||
case ok && (kind == crypto.S3KMS || kind == crypto.SSEC) && isCacheEncrypted(c.Meta):
|
||||
etags := make([]string, 0, len(c.PartETags))
|
||||
for i := range c.PartETags {
|
||||
ETag, err := etag.Parse(c.PartETags[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
etags = append(etags, ETag.Format().String())
|
||||
}
|
||||
return etags, nil
|
||||
default:
|
||||
return c.PartETags, nil
|
||||
}
|
||||
|
||||
@@ -31,8 +31,8 @@ import (
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/color"
|
||||
"github.com/minio/minio/internal/config/cache"
|
||||
"github.com/minio/minio/internal/disk"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/sync/errgroup"
|
||||
"github.com/minio/pkg/wildcard"
|
||||
@@ -594,12 +594,23 @@ func newCache(config cache.Config) ([]*diskCache, bool, error) {
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
var warningMsg string
|
||||
for i, dir := range config.Drives {
|
||||
// skip diskCache creation for cache drives missing a format.json
|
||||
if formats[i] == nil {
|
||||
caches = append(caches, nil)
|
||||
continue
|
||||
}
|
||||
if !globalIsCICD && len(warningMsg) == 0 {
|
||||
rootDsk, err := disk.IsRootDisk(dir, "/")
|
||||
if err != nil {
|
||||
warningMsg = fmt.Sprintf("Invalid cache dir %s err : %s", dir, err.Error())
|
||||
}
|
||||
if rootDsk {
|
||||
warningMsg = fmt.Sprintf("cache dir cannot be part of root disk: %s", dir)
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkAtimeSupport(dir); err != nil {
|
||||
return nil, false, fmt.Errorf("Atime support required for disk caching, atime check failed with %w", err)
|
||||
}
|
||||
@@ -610,6 +621,9 @@ func newCache(config cache.Config) ([]*diskCache, bool, error) {
|
||||
}
|
||||
caches = append(caches, cache)
|
||||
}
|
||||
if warningMsg != "" {
|
||||
logger.Info(color.Yellow(fmt.Sprintf("WARNING: Usage of root disk for disk caching is deprecated: %s", warningMsg)))
|
||||
}
|
||||
return caches, migrating, nil
|
||||
}
|
||||
|
||||
@@ -724,35 +738,30 @@ func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r *
|
||||
// Initialize pipe to stream data to cache
|
||||
rPipe, wPipe := io.Pipe()
|
||||
infoCh := make(chan ObjectInfo)
|
||||
errorCh := make(chan error)
|
||||
go func() {
|
||||
defer close(infoCh)
|
||||
info, err := putObjectFn(ctx, bucket, object, NewPutObjReader(hashReader), opts)
|
||||
if err != nil {
|
||||
close(infoCh)
|
||||
pipeReader.CloseWithError(err)
|
||||
rPipe.CloseWithError(err)
|
||||
errorCh <- err
|
||||
return
|
||||
pipeReader.CloseWithError(err)
|
||||
rPipe.CloseWithError(err)
|
||||
if err == nil {
|
||||
infoCh <- info
|
||||
}
|
||||
close(errorCh)
|
||||
infoCh <- info
|
||||
}()
|
||||
|
||||
go func() {
|
||||
_, err := dcache.put(lkctx.Context(), bucket, object, rPipe, r.Size(), nil, opts, false, false)
|
||||
if err != nil {
|
||||
rPipe.CloseWithError(err)
|
||||
return
|
||||
logger.LogIf(lkctx.Context(), err)
|
||||
}
|
||||
// We do not care about errors to cached backend.
|
||||
rPipe.Close()
|
||||
}()
|
||||
|
||||
mwriter := cacheMultiWriter(pipeWriter, wPipe)
|
||||
_, err = io.Copy(mwriter, r)
|
||||
pipeWriter.Close()
|
||||
wPipe.Close()
|
||||
|
||||
if err != nil {
|
||||
err = <-errorCh
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
info := <-infoCh
|
||||
@@ -788,8 +797,7 @@ func (c *cacheObjects) uploadObject(ctx context.Context, oi ObjectInfo) {
|
||||
return
|
||||
}
|
||||
var opts ObjectOptions
|
||||
opts.UserDefined = make(map[string]string)
|
||||
opts.UserDefined[xhttp.ContentMD5] = oi.UserDefined["content-md5"]
|
||||
opts.UserDefined = cloneMSS(oi.UserDefined)
|
||||
objInfo, err := c.InnerPutObjectFn(ctx, oi.Bucket, oi.Name, NewPutObjReader(hashReader), opts)
|
||||
wbCommitStatus := CommitComplete
|
||||
size := objInfo.Size
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
@@ -35,6 +36,7 @@ import (
|
||||
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/etag"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
@@ -71,18 +73,132 @@ const (
|
||||
)
|
||||
|
||||
// KMSKeyID returns in AWS compatible KMS KeyID() format.
|
||||
func (o ObjectInfo) KMSKeyID() string {
|
||||
if len(o.UserDefined) == 0 {
|
||||
func (o *ObjectInfo) KMSKeyID() string { return kmsKeyIDFromMetadata(o.UserDefined) }
|
||||
|
||||
// KMSKeyID returns in AWS compatible KMS KeyID() format.
|
||||
func (o *MultipartInfo) KMSKeyID() string { return kmsKeyIDFromMetadata(o.UserDefined) }
|
||||
|
||||
// kmsKeyIDFromMetadata returns any AWS S3 KMS key ID in the
|
||||
// metadata, if any. It returns an empty ID if no key ID is
|
||||
// present.
|
||||
func kmsKeyIDFromMetadata(metadata map[string]string) string {
|
||||
const ARNPrefix = "arn:aws:kms:"
|
||||
if len(metadata) == 0 {
|
||||
return ""
|
||||
}
|
||||
kmsID, ok := o.UserDefined[crypto.MetaKeyID]
|
||||
kmsID, ok := metadata[crypto.MetaKeyID]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
if strings.HasPrefix(kmsID, "arn:aws:kms:") {
|
||||
if strings.HasPrefix(kmsID, ARNPrefix) {
|
||||
return kmsID
|
||||
}
|
||||
return "arn:aws:kms:" + kmsID
|
||||
return ARNPrefix + kmsID
|
||||
}
|
||||
|
||||
// DecryptETags decryptes the ETag of all ObjectInfos using the KMS.
|
||||
//
|
||||
// It adjusts the size of all encrypted objects since encrypted
|
||||
// objects are slightly larger due to encryption overhead.
|
||||
// Further, it decrypts all single-part SSE-S3 encrypted objects
|
||||
// and formats ETags of SSE-C / SSE-KMS encrypted objects to
|
||||
// be AWS S3 compliant.
|
||||
//
|
||||
// DecryptETags uses a KMS bulk decryption API, if available, which
|
||||
// is more efficient than decrypting ETags sequentually.
|
||||
func DecryptETags(ctx context.Context, KMS kms.KMS, objects []ObjectInfo) error {
|
||||
const BatchSize = 250 // We process the objects in batches - 250 is a reasonable default.
|
||||
var (
|
||||
metadata = make([]map[string]string, 0, BatchSize)
|
||||
buckets = make([]string, 0, BatchSize)
|
||||
names = make([]string, 0, BatchSize)
|
||||
)
|
||||
for len(objects) > 0 {
|
||||
N := BatchSize
|
||||
if len(objects) < BatchSize {
|
||||
N = len(objects)
|
||||
}
|
||||
batch := objects[:N]
|
||||
|
||||
// We have to decrypt only ETags of SSE-S3 single-part
|
||||
// objects.
|
||||
// Therefore, we remember which objects (there index)
|
||||
// in the current batch are single-part SSE-S3 objects.
|
||||
metadata = metadata[:0:N]
|
||||
buckets = buckets[:0:N]
|
||||
names = names[:0:N]
|
||||
SSES3SinglePartObjects := make(map[int]bool)
|
||||
for i, object := range batch {
|
||||
if kind, ok := crypto.IsEncrypted(object.UserDefined); ok && kind == crypto.S3 && !crypto.IsMultiPart(object.UserDefined) {
|
||||
SSES3SinglePartObjects[i] = true
|
||||
|
||||
metadata = append(metadata, object.UserDefined)
|
||||
buckets = append(buckets, object.Bucket)
|
||||
names = append(names, object.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// If there are no SSE-S3 single-part objects
|
||||
// we can skip the decryption process. However,
|
||||
// we still have to adjust the size and ETag
|
||||
// of SSE-C and SSE-KMS objects.
|
||||
if len(SSES3SinglePartObjects) == 0 {
|
||||
for i := range batch {
|
||||
size, err := batch[i].GetActualSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
batch[i].Size = size
|
||||
|
||||
if _, ok := crypto.IsEncrypted(batch[i].UserDefined); ok {
|
||||
ETag, err := etag.Parse(batch[i].ETag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
batch[i].ETag = ETag.Format().String()
|
||||
}
|
||||
}
|
||||
objects = objects[N:]
|
||||
continue
|
||||
}
|
||||
|
||||
// There is at least one SSE-S3 single-part object.
|
||||
// For all SSE-S3 single-part objects we have to
|
||||
// fetch their decryption keys. We do this using
|
||||
// a Bulk-Decryption API call, if available.
|
||||
keys, err := crypto.S3.UnsealObjectKeys(ctx, KMS, metadata, buckets, names)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now, we have to decrypt the ETags of SSE-S3 single-part
|
||||
// objects and adjust the size and ETags of all encrypted
|
||||
// objects.
|
||||
for i := range batch {
|
||||
size, err := batch[i].GetActualSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
batch[i].Size = size
|
||||
|
||||
if _, ok := crypto.IsEncrypted(batch[i].UserDefined); ok {
|
||||
ETag, err := etag.Parse(batch[i].ETag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if SSES3SinglePartObjects[i] && ETag.IsEncrypted() {
|
||||
ETag, err = etag.Decrypt(keys[0][:], ETag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keys = keys[1:]
|
||||
}
|
||||
batch[i].ETag = ETag.Format().String()
|
||||
}
|
||||
}
|
||||
objects = objects[N:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isMultipart returns true if the current object is
|
||||
|
||||
@@ -582,7 +582,7 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp
|
||||
return endpoints, setupType, config.ErrInvalidFSEndpoint(nil).Msg("use path style endpoint for FS setup")
|
||||
}
|
||||
endpoints = append(endpoints, endpoint)
|
||||
setupType = FSSetupType
|
||||
setupType = ErasureSDSetupType
|
||||
|
||||
// Check for cross device mounts if any.
|
||||
if err = checkCrossDeviceMounts(endpoints); err != nil {
|
||||
|
||||
@@ -231,10 +231,10 @@ func TestCreateEndpoints(t *testing.T) {
|
||||
}{
|
||||
{"localhost", [][]string{}, "", Endpoints{}, -1, fmt.Errorf("address localhost: missing port in address")},
|
||||
|
||||
// FS Setup
|
||||
// Erasure Single Drive
|
||||
{"localhost:9000", [][]string{{"http://localhost/d1"}}, "", Endpoints{}, -1, fmt.Errorf("use path style endpoint for FS setup")},
|
||||
{":443", [][]string{{"/d1"}}, ":443", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, FSSetupType, nil},
|
||||
{"localhost:10000", [][]string{{"/d1"}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, FSSetupType, nil},
|
||||
{":443", [][]string{{"/d1"}}, ":443", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, ErasureSDSetupType, nil},
|
||||
{"localhost:10000", [][]string{{"/d1"}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, ErasureSDSetupType, nil},
|
||||
{"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")},
|
||||
|
||||
// Erasure Setup with PathEndpointType
|
||||
|
||||
@@ -39,8 +39,10 @@ func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket stri
|
||||
defer NSUpdated(bucket, slashSeparator)
|
||||
|
||||
// Verify if bucket is valid.
|
||||
if err := s3utils.CheckValidBucketNameStrict(bucket); err != nil {
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
if !isMinioMetaBucketName(bucket) {
|
||||
if err := s3utils.CheckValidBucketNameStrict(bucket); err != nil {
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
|
||||
storageDisks := er.getDisks()
|
||||
@@ -53,6 +55,11 @@ func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket stri
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] != nil {
|
||||
if err := storageDisks[index].MakeVol(ctx, bucket); err != nil {
|
||||
if opts.ForceCreate && errors.Is(err, errVolumeExists) {
|
||||
// No need to return error when force create was
|
||||
// requested.
|
||||
return nil
|
||||
}
|
||||
if !errors.Is(err, errVolumeExists) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ type Erasure struct {
|
||||
// NewErasure creates a new ErasureStorage.
|
||||
func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (e Erasure, err error) {
|
||||
// Check the parameters for sanity now.
|
||||
if dataBlocks <= 0 || parityBlocks <= 0 {
|
||||
if dataBlocks <= 0 || parityBlocks < 0 {
|
||||
return e, reedsolomon.ErrInvShardNum
|
||||
}
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ func commonTimeAndOccurence(times []time.Time, group time.Duration) (maxTime tim
|
||||
groupNano := group.Nanoseconds()
|
||||
// Ignore the uuid sentinel and count the rest.
|
||||
for _, t := range times {
|
||||
if t.Equal(timeSentinel) {
|
||||
if t.Equal(timeSentinel) || t.IsZero() {
|
||||
continue
|
||||
}
|
||||
nano := t.UnixNano()
|
||||
|
||||
@@ -60,7 +60,8 @@ func (fi FileInfo) DataShardFixed() bool {
|
||||
// also heals the missing entries for bucket metadata files
|
||||
// `policy.json, notification.xml, listeners.json`.
|
||||
func (er erasureObjects) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (
|
||||
result madmin.HealResultItem, err error) {
|
||||
result madmin.HealResultItem, err error,
|
||||
) {
|
||||
if !opts.DryRun {
|
||||
defer NSUpdated(bucket, slashSeparator)
|
||||
}
|
||||
@@ -305,9 +306,13 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
// Re-read when we have lock...
|
||||
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, bucket, object, versionID, true)
|
||||
if isAllNotFound(errs) {
|
||||
err := errFileNotFound
|
||||
if versionID != "" {
|
||||
err = errFileVersionNotFound
|
||||
}
|
||||
// Nothing to do, file is already gone.
|
||||
return er.defaultHealResult(FileInfo{}, storageDisks, storageEndpoints,
|
||||
errs, bucket, object, versionID), nil
|
||||
errs, bucket, object, versionID), err
|
||||
}
|
||||
|
||||
readQuorum, _, err := objectQuorumFromMeta(ctx, partsMetadata, errs, er.defaultParityCount)
|
||||
@@ -326,7 +331,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
// present, it is as good as object not found.
|
||||
latestMeta, err := pickValidFileInfo(ctx, partsMetadata, modTime, readQuorum)
|
||||
if err != nil {
|
||||
return result, toObjectErr(err, bucket, object, versionID)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// List of disks having all parts as per latest metadata.
|
||||
@@ -396,8 +401,12 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
|
||||
if isAllNotFound(errs) {
|
||||
// File is fully gone, fileInfo is empty.
|
||||
err := errFileNotFound
|
||||
if versionID != "" {
|
||||
err = errFileVersionNotFound
|
||||
}
|
||||
return er.defaultHealResult(FileInfo{}, storageDisks, storageEndpoints, errs,
|
||||
bucket, object, versionID), nil
|
||||
bucket, object, versionID), err
|
||||
}
|
||||
|
||||
// If less than read quorum number of disks have all the parts
|
||||
@@ -417,10 +426,19 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
return result, nil
|
||||
}
|
||||
|
||||
if !latestMeta.XLV1 && !latestMeta.Deleted && disksToHealCount > latestMeta.Erasure.ParityBlocks {
|
||||
// When disk to heal count is greater than parity blocks we should simply error out.
|
||||
err := fmt.Errorf("more disks are expected to heal than parity, returned errors: %v (dataErrs %v) -> %s/%s(%s)", errs, dataErrs, bucket, object, versionID)
|
||||
logger.LogIf(ctx, err)
|
||||
return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs,
|
||||
bucket, object, versionID), err
|
||||
}
|
||||
|
||||
cleanFileInfo := func(fi FileInfo) FileInfo {
|
||||
// Returns a copy of the 'fi' with checksums and parts nil'ed.
|
||||
// Returns a copy of the 'fi' with erasure index, checksums and inline data niled.
|
||||
nfi := fi
|
||||
if !fi.IsRemote() {
|
||||
if !nfi.IsRemote() {
|
||||
nfi.Data = nil
|
||||
nfi.Erasure.Index = 0
|
||||
nfi.Erasure.Checksums = nil
|
||||
}
|
||||
@@ -431,12 +449,25 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
tmpID := mustGetUUID()
|
||||
migrateDataDir := mustGetUUID()
|
||||
|
||||
// Reorder so that we have data disks first and parity disks next.
|
||||
latestDisks := shuffleDisks(availableDisks, latestMeta.Erasure.Distribution)
|
||||
outDatedDisks = shuffleDisks(outDatedDisks, latestMeta.Erasure.Distribution)
|
||||
partsMetadata = shufflePartsMetadata(partsMetadata, latestMeta.Erasure.Distribution)
|
||||
|
||||
copyPartsMetadata := make([]FileInfo, len(partsMetadata))
|
||||
for i := range latestDisks {
|
||||
if latestDisks[i] == nil {
|
||||
continue
|
||||
}
|
||||
copyPartsMetadata[i] = partsMetadata[i]
|
||||
}
|
||||
|
||||
for i := range outDatedDisks {
|
||||
if outDatedDisks[i] == nil {
|
||||
continue
|
||||
}
|
||||
copyPartsMetadata[i] = partsMetadata[i]
|
||||
// Make sure to write the FileInfo information
|
||||
// that is expected to be in quorum.
|
||||
partsMetadata[i] = cleanFileInfo(latestMeta)
|
||||
}
|
||||
|
||||
@@ -455,12 +486,6 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
inlineBuffers = make([]*bytes.Buffer, len(outDatedDisks))
|
||||
}
|
||||
|
||||
// Reorder so that we have data disks first and parity disks next.
|
||||
latestDisks := shuffleDisks(availableDisks, latestMeta.Erasure.Distribution)
|
||||
outDatedDisks = shuffleDisks(outDatedDisks, latestMeta.Erasure.Distribution)
|
||||
partsMetadata = shufflePartsMetadata(partsMetadata, latestMeta.Erasure.Distribution)
|
||||
copyPartsMetadata = shufflePartsMetadata(copyPartsMetadata, latestMeta.Erasure.Distribution)
|
||||
|
||||
if !latestMeta.Deleted && !latestMeta.IsRemote() {
|
||||
// Heal each part. erasureHealFile() will write the healed
|
||||
// part to .minio/tmp/uuid/ which needs to be renamed later to
|
||||
@@ -468,7 +493,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
erasure, err := NewErasure(ctx, latestMeta.Erasure.DataBlocks,
|
||||
latestMeta.Erasure.ParityBlocks, latestMeta.Erasure.BlockSize)
|
||||
if err != nil {
|
||||
return result, toObjectErr(err, bucket, object)
|
||||
return result, err
|
||||
}
|
||||
|
||||
erasureInfo := latestMeta.Erasure
|
||||
@@ -485,7 +510,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
}
|
||||
checksumInfo := copyPartsMetadata[i].Erasure.GetChecksumInfo(partNumber)
|
||||
partPath := pathJoin(object, srcDataDir, fmt.Sprintf("part.%d", partNumber))
|
||||
readers[i] = newBitrotReader(disk, partsMetadata[i].Data, bucket, partPath, tillOffset, checksumAlgo,
|
||||
readers[i] = newBitrotReader(disk, copyPartsMetadata[i].Data, bucket, partPath, tillOffset, checksumAlgo,
|
||||
checksumInfo.Hash, erasure.ShardSize())
|
||||
}
|
||||
writers := make([]io.Writer, len(outDatedDisks))
|
||||
@@ -506,7 +531,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
closeBitrotReaders(readers)
|
||||
closeBitrotWriters(writers)
|
||||
if err != nil {
|
||||
return result, toObjectErr(err, bucket, object)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// outDatedDisks that had write errors should not be
|
||||
@@ -561,7 +586,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
// Attempt a rename now from healed data to final location.
|
||||
if err = disk.RenameData(ctx, minioMetaTmpBucket, tmpID, partsMetadata[i], bucket, object); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return result, toObjectErr(err, bucket, object)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Remove any remaining parts from outdated disks from before transition.
|
||||
@@ -657,10 +682,16 @@ func (er erasureObjects) healObjectDir(ctx context.Context, bucket, object strin
|
||||
hr.After.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateCorrupt}
|
||||
}
|
||||
}
|
||||
if dryRun || danglingObject || isAllNotFound(errs) {
|
||||
if danglingObject || isAllNotFound(errs) {
|
||||
// Nothing to do, file is already gone.
|
||||
return hr, errFileNotFound
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
// Quit without try to heal the object dir
|
||||
return hr, nil
|
||||
}
|
||||
|
||||
for i, err := range errs {
|
||||
if err == errVolumeNotFound || err == errFileNotFound {
|
||||
// Bucket or prefix/directory not found
|
||||
@@ -820,8 +851,13 @@ func (er erasureObjects) purgeObjectDangling(ctx context.Context, bucket, object
|
||||
// Dangling object successfully purged, size is '0'
|
||||
m.Size = 0
|
||||
}
|
||||
// Generate file/version not found with default heal result
|
||||
err = errFileNotFound
|
||||
if versionID != "" {
|
||||
err = errFileVersionNotFound
|
||||
}
|
||||
return er.defaultHealResult(m, storageDisks, storageEndpoints,
|
||||
errs, bucket, object, versionID), nil
|
||||
errs, bucket, object, versionID), err
|
||||
}
|
||||
|
||||
// Object is considered dangling/corrupted if any only
|
||||
@@ -891,12 +927,6 @@ func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (valid
|
||||
|
||||
// HealObject - heal the given object, automatically deletes the object if stale/corrupted if `remove` is true.
|
||||
func (er erasureObjects) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (hr madmin.HealResultItem, err error) {
|
||||
defer func() {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
}()
|
||||
|
||||
// Create context that also contains information about the object and bucket.
|
||||
// The top level handler might not have this information.
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
@@ -910,7 +940,8 @@ func (er erasureObjects) HealObject(ctx context.Context, bucket, object, version
|
||||
|
||||
// Healing directories handle it separately.
|
||||
if HasSuffix(object, SlashSeparator) {
|
||||
return er.healObjectDir(healCtx, bucket, object, opts.DryRun, opts.Remove)
|
||||
hr, err := er.healObjectDir(healCtx, bucket, object, opts.DryRun, opts.Remove)
|
||||
return hr, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
storageDisks := er.getDisks()
|
||||
@@ -925,9 +956,13 @@ func (er erasureObjects) HealObject(ctx context.Context, bucket, object, version
|
||||
// This allows to quickly check if all is ok or all are missing.
|
||||
_, errs := readAllFileInfo(healCtx, storageDisks, bucket, object, versionID, false)
|
||||
if isAllNotFound(errs) {
|
||||
err := errFileNotFound
|
||||
if versionID != "" {
|
||||
err = errFileVersionNotFound
|
||||
}
|
||||
// Nothing to do, file is already gone.
|
||||
return er.defaultHealResult(FileInfo{}, storageDisks, storageEndpoints,
|
||||
errs, bucket, object, versionID), nil
|
||||
errs, bucket, object, versionID), toObjectErr(err, bucket, object, versionID)
|
||||
}
|
||||
|
||||
// Heal the object.
|
||||
@@ -938,5 +973,5 @@ func (er erasureObjects) HealObject(ctx context.Context, bucket, object, version
|
||||
opts.ScanMode = madmin.HealDeepScan
|
||||
hr, err = er.healObject(healCtx, bucket, object, versionID, opts)
|
||||
}
|
||||
return hr, err
|
||||
return hr, toObjectErr(err, bucket, object, versionID)
|
||||
}
|
||||
|
||||
@@ -684,16 +684,18 @@ func TestHealObjectCorruptedPools(t *testing.T) {
|
||||
t.Fatalf("Failed to make a bucket - %v", err)
|
||||
}
|
||||
|
||||
// Create an object with multiple parts uploaded in decreasing
|
||||
// part number.
|
||||
uploadID, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
// Upload a multipart object in the second pool
|
||||
z := objLayer.(*erasureServerPools)
|
||||
set := z.serverPools[1]
|
||||
|
||||
uploadID, err := set.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a multipart upload - %v", err)
|
||||
}
|
||||
|
||||
var uploadedParts []CompletePart
|
||||
for _, partID := range []int{2, 1} {
|
||||
pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
||||
pInfo, err1 := set.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
||||
if err1 != nil {
|
||||
t.Fatalf("Failed to upload a part - %v", err1)
|
||||
}
|
||||
@@ -703,116 +705,114 @@ func TestHealObjectCorruptedPools(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
_, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{})
|
||||
_, err = set.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to complete multipart upload - %v", err)
|
||||
}
|
||||
|
||||
// Test 1: Remove the object backend files from the first disk.
|
||||
z := objLayer.(*erasureServerPools)
|
||||
for _, set := range z.serverPools {
|
||||
er := set.sets[0]
|
||||
erasureDisks := er.getDisks()
|
||||
firstDisk := erasureDisks[0]
|
||||
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete a file - %v", err)
|
||||
}
|
||||
er := set.sets[0]
|
||||
erasureDisks := er.getDisks()
|
||||
firstDisk := erasureDisks[0]
|
||||
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete a file - %v", err)
|
||||
}
|
||||
|
||||
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to heal object - %v", err)
|
||||
}
|
||||
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to heal object - %v", err)
|
||||
}
|
||||
|
||||
fileInfos, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
|
||||
fi, err := getLatestFileInfo(ctx, fileInfos, errs)
|
||||
if errors.Is(err, errFileNotFound) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to getLatestFileInfo - %v", err)
|
||||
}
|
||||
fileInfos, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
|
||||
fi, err := getLatestFileInfo(ctx, fileInfos, errs)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to getLatestFileInfo - %v", err)
|
||||
}
|
||||
|
||||
if _, err = firstDisk.StatInfoFile(context.Background(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
|
||||
t.Errorf("Expected xl.meta file to be present but stat failed - %v", err)
|
||||
}
|
||||
if _, err = firstDisk.StatInfoFile(context.Background(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
|
||||
t.Errorf("Expected xl.meta file to be present but stat failed - %v", err)
|
||||
}
|
||||
|
||||
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), false)
|
||||
if err != nil {
|
||||
t.Errorf("Failure during deleting part.1 - %v", err)
|
||||
}
|
||||
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), false)
|
||||
if err != nil {
|
||||
t.Errorf("Failure during deleting part.1 - %v", err)
|
||||
}
|
||||
|
||||
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte{})
|
||||
if err != nil {
|
||||
t.Errorf("Failure during creating part.1 - %v", err)
|
||||
}
|
||||
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte{})
|
||||
if err != nil {
|
||||
t.Errorf("Failure during creating part.1 - %v", err)
|
||||
}
|
||||
|
||||
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil but received %v", err)
|
||||
}
|
||||
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil but received %v", err)
|
||||
}
|
||||
|
||||
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
|
||||
nfi, err := getLatestFileInfo(ctx, fileInfos, errs)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to getLatestFileInfo - %v", err)
|
||||
}
|
||||
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
|
||||
nfi, err := getLatestFileInfo(ctx, fileInfos, errs)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to getLatestFileInfo - %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(fi, nfi) {
|
||||
t.Fatalf("FileInfo not equal after healing")
|
||||
}
|
||||
fi.DiskMTime = time.Time{}
|
||||
nfi.DiskMTime = time.Time{}
|
||||
if !reflect.DeepEqual(fi, nfi) {
|
||||
t.Fatalf("FileInfo not equal after healing: %v != %v", fi, nfi)
|
||||
}
|
||||
|
||||
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), false)
|
||||
if err != nil {
|
||||
t.Errorf("Failure during deleting part.1 - %v", err)
|
||||
}
|
||||
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), false)
|
||||
if err != nil {
|
||||
t.Errorf("Failure during deleting part.1 - %v", err)
|
||||
}
|
||||
|
||||
bdata := bytes.Repeat([]byte("b"), int(nfi.Size))
|
||||
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), bdata)
|
||||
if err != nil {
|
||||
t.Errorf("Failure during creating part.1 - %v", err)
|
||||
}
|
||||
bdata := bytes.Repeat([]byte("b"), int(nfi.Size))
|
||||
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), bdata)
|
||||
if err != nil {
|
||||
t.Errorf("Failure during creating part.1 - %v", err)
|
||||
}
|
||||
|
||||
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil but received %v", err)
|
||||
}
|
||||
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil but received %v", err)
|
||||
}
|
||||
|
||||
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
|
||||
nfi, err = getLatestFileInfo(ctx, fileInfos, errs)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to getLatestFileInfo - %v", err)
|
||||
}
|
||||
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
|
||||
nfi, err = getLatestFileInfo(ctx, fileInfos, errs)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to getLatestFileInfo - %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(fi, nfi) {
|
||||
t.Fatalf("FileInfo not equal after healing")
|
||||
}
|
||||
fi.DiskMTime = time.Time{}
|
||||
nfi.DiskMTime = time.Time{}
|
||||
if !reflect.DeepEqual(fi, nfi) {
|
||||
t.Fatalf("FileInfo not equal after healing: %v != %v", fi, nfi)
|
||||
}
|
||||
|
||||
// Test 4: checks if HealObject returns an error when xl.meta is not found
|
||||
// in more than read quorum number of disks, to create a corrupted situation.
|
||||
for i := 0; i <= nfi.Erasure.DataBlocks; i++ {
|
||||
erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
|
||||
}
|
||||
// Test 4: checks if HealObject returns an error when xl.meta is not found
|
||||
// in more than read quorum number of disks, to create a corrupted situation.
|
||||
for i := 0; i <= nfi.Erasure.DataBlocks; i++ {
|
||||
erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
|
||||
}
|
||||
|
||||
// Try healing now, expect to receive errFileNotFound.
|
||||
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
|
||||
if err != nil {
|
||||
if _, ok := err.(ObjectNotFound); !ok {
|
||||
t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err)
|
||||
}
|
||||
}
|
||||
|
||||
// since majority of xl.meta's are not available, object should be successfully deleted.
|
||||
_, err = objLayer.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
// Try healing now, expect to receive errFileNotFound.
|
||||
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
|
||||
if err != nil {
|
||||
if _, ok := err.(ObjectNotFound); !ok {
|
||||
t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < (nfi.Erasure.DataBlocks + nfi.Erasure.ParityBlocks); i++ {
|
||||
_, err = erasureDisks[i].StatInfoFile(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
|
||||
if err == nil {
|
||||
t.Errorf("Expected xl.meta file to be not present, but succeeeded")
|
||||
}
|
||||
// since majority of xl.meta's are not available, object should be successfully deleted.
|
||||
_, err = objLayer.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
if _, ok := err.(ObjectNotFound); !ok {
|
||||
t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err)
|
||||
}
|
||||
|
||||
for i := 0; i < (nfi.Erasure.DataBlocks + nfi.Erasure.ParityBlocks); i++ {
|
||||
_, err = erasureDisks[i].StatInfoFile(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
|
||||
if err == nil {
|
||||
t.Errorf("Expected xl.meta file to be not present, but succeeeded")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -904,6 +904,8 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
|
||||
t.Fatalf("Failed to getLatestFileInfo - %v", err)
|
||||
}
|
||||
|
||||
fi.DiskMTime = time.Time{}
|
||||
nfi1.DiskMTime = time.Time{}
|
||||
if !reflect.DeepEqual(fi, nfi1) {
|
||||
t.Fatalf("FileInfo not equal after healing")
|
||||
}
|
||||
@@ -925,6 +927,8 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
|
||||
t.Fatalf("Failed to getLatestFileInfo - %v", err)
|
||||
}
|
||||
|
||||
fi.DiskMTime = time.Time{}
|
||||
nfi2.DiskMTime = time.Time{}
|
||||
if !reflect.DeepEqual(fi, nfi2) {
|
||||
t.Fatalf("FileInfo not equal after healing")
|
||||
}
|
||||
@@ -1194,7 +1198,7 @@ func TestHealObjectErasure(t *testing.T) {
|
||||
// since majority of xl.meta's are not available, object quorum
|
||||
// can't be read properly will be deleted automatically and
|
||||
// err is nil
|
||||
if err != nil {
|
||||
if !isErrObjectNotFound(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,24 +137,29 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, bucket, object, ve
|
||||
return errDiskNotFound
|
||||
}
|
||||
metadataArray[index], err = disks[index].ReadVersion(ctx, bucket, object, versionID, readData)
|
||||
if err != nil {
|
||||
if !IsErr(err, []error{
|
||||
errFileNotFound,
|
||||
errVolumeNotFound,
|
||||
errFileVersionNotFound,
|
||||
errDiskNotFound,
|
||||
}...) {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)",
|
||||
disks[index], bucket, object, err),
|
||||
disks[index].String())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}, index)
|
||||
}
|
||||
|
||||
errs := g.Wait()
|
||||
for index, err := range errs {
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
if !IsErr(err, []error{
|
||||
errFileNotFound,
|
||||
errVolumeNotFound,
|
||||
errFileVersionNotFound,
|
||||
errDiskNotFound,
|
||||
}...) {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)",
|
||||
disks[index], bucket, object, err),
|
||||
disks[index].String())
|
||||
}
|
||||
}
|
||||
|
||||
// Return all the metadata.
|
||||
return metadataArray, g.Wait()
|
||||
return metadataArray, errs
|
||||
}
|
||||
|
||||
// shuffleDisksAndPartsMetadataByIndex this function should be always used by GetObjectNInfo()
|
||||
@@ -178,10 +183,6 @@ func shuffleDisksAndPartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo,
|
||||
inconsistent++
|
||||
continue
|
||||
}
|
||||
if len(fi.Data) != len(meta.Data) {
|
||||
inconsistent++
|
||||
continue
|
||||
}
|
||||
if meta.XLV1 != fi.XLV1 {
|
||||
inconsistent++
|
||||
continue
|
||||
@@ -229,12 +230,6 @@ func shuffleDisksAndPartsMetadata(disks []StorageAPI, partsMetadata []FileInfo,
|
||||
// if object was ever written previously.
|
||||
continue
|
||||
}
|
||||
if !init && len(fi.Data) != len(partsMetadata[index].Data) {
|
||||
// Check for length of data parts only when
|
||||
// fi.ModTime is not empty - ModTime is always set,
|
||||
// if object was ever written previously.
|
||||
continue
|
||||
}
|
||||
if !init && fi.XLV1 != partsMetadata[index].XLV1 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -28,6 +27,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/sync/errgroup"
|
||||
@@ -99,15 +99,15 @@ func (fi FileInfo) IsValid() bool {
|
||||
fi.Erasure.Index <= dataBlocks+parityBlocks &&
|
||||
len(fi.Erasure.Distribution) == (dataBlocks+parityBlocks))
|
||||
return ((dataBlocks >= parityBlocks) &&
|
||||
(dataBlocks != 0) && (parityBlocks != 0) &&
|
||||
(dataBlocks > 0) && (parityBlocks >= 0) &&
|
||||
correctIndexes)
|
||||
}
|
||||
|
||||
// ToObjectInfo - Converts metadata to object info.
|
||||
func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
|
||||
func (fi FileInfo) ToObjectInfo(bucket, object string, versioned bool) ObjectInfo {
|
||||
object = decodeDirObject(object)
|
||||
versionID := fi.VersionID
|
||||
if (globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket)) && versionID == "" {
|
||||
if versioned && versionID == "" {
|
||||
versionID = nullVersionID
|
||||
}
|
||||
|
||||
@@ -284,7 +284,7 @@ func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIn
|
||||
|
||||
func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (FileInfo, error) {
|
||||
// with less quorum return error.
|
||||
if quorum < 2 {
|
||||
if quorum < 1 {
|
||||
return FileInfo{}, errErasureReadQuorum
|
||||
}
|
||||
metaHashes := make([]string, len(metaArr))
|
||||
@@ -297,8 +297,6 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.
|
||||
fmt.Fprintf(h, "part.%d", part.Number)
|
||||
}
|
||||
fmt.Fprintf(h, "%v", meta.Erasure.Distribution)
|
||||
// make sure that length of Data is same
|
||||
fmt.Fprintf(h, "%v", len(meta.Data))
|
||||
|
||||
// ILM transition fields
|
||||
fmt.Fprint(h, meta.TransitionStatus)
|
||||
@@ -400,6 +398,10 @@ func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix
|
||||
// readQuorum is the min required disks to read data.
|
||||
// writeQuorum is the min required disks to write data.
|
||||
func objectQuorumFromMeta(ctx context.Context, partsMetaData []FileInfo, errs []error, defaultParityCount int) (objectReadQuorum, objectWriteQuorum int, err error) {
|
||||
if defaultParityCount == 0 {
|
||||
return 1, 1, nil
|
||||
}
|
||||
|
||||
// get the latest updated Metadata and a count of all the latest updated FileInfo(s)
|
||||
latestFileInfo, err := getLatestFileInfo(ctx, partsMetaData, errs)
|
||||
if err != nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user