Compare commits
276 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
20960b6a2d | ||
|
|
3bd3470d0b | ||
|
|
ba39ed9af7 | ||
|
|
62e6dc950d | ||
|
|
5a5046ce45 | ||
|
|
ad04afe381 | ||
|
|
ba9f0f2480 | ||
|
|
d06b63d056 | ||
|
|
7ce28c3b1d | ||
|
|
e3ac4035b9 | ||
|
|
d21b6daa49 | ||
|
|
76ebb16688 | ||
|
|
55aa431578 | ||
|
|
614981e566 | ||
|
|
b8b956a05d | ||
|
|
d2eed44c78 | ||
|
|
789cbc6fb2 | ||
|
|
0662c90b5c | ||
|
|
a2cab02554 | ||
|
|
6c7a21df6b | ||
|
|
f933b0b708 | ||
|
|
9f305273a7 | ||
|
|
cbd9efcb43 | ||
|
|
29a25a538f | ||
|
|
2dd8faaedc | ||
|
|
f00187033d | ||
|
|
c5141d65ac | ||
|
|
069c4015cd | ||
|
|
2f6e03fb60 | ||
|
|
0fbb945e13 | ||
|
|
b94dd835c9 | ||
|
|
44fc707423 | ||
|
|
5aaef9790f | ||
|
|
7edc352d23 | ||
|
|
850a84b08a | ||
|
|
4148754ce0 | ||
|
|
2107722829 | ||
|
|
d326ba52e9 | ||
|
|
91e1487de4 | ||
|
|
5ffb2a9605 | ||
|
|
17fe91d6d1 | ||
|
|
90a9f2dd70 | ||
|
|
d5e48cfd65 | ||
|
|
d274566463 | ||
|
|
39ac720826 | ||
|
|
21b6204692 | ||
|
|
d98faeb26a | ||
|
|
0a63dc199c | ||
|
|
3ba857dfa1 | ||
|
|
a8554c4022 | ||
|
|
ba54b39c02 | ||
|
|
2a75225569 | ||
|
|
e72429c79c | ||
|
|
c5b3f5553f | ||
|
|
d3ae0aaad3 | ||
|
|
d67bccf861 | ||
|
|
1277ad69a6 | ||
|
|
8f93e81afb | ||
|
|
4af31e654b | ||
|
|
aad50579ba | ||
|
|
38d059b0ae | ||
|
|
bd4eeb4522 | ||
|
|
03e3493288 | ||
|
|
64baedf5a4 | ||
|
|
2f64d5f77e | ||
|
|
f79a4ef4d0 | ||
|
|
2d53854b19 | ||
|
|
e5c83535af | ||
|
|
c904ef966e | ||
|
|
8f266e0772 | ||
|
|
e0fe7cc391 | ||
|
|
9d20dec56a | ||
|
|
597a785253 | ||
|
|
7d75b1e758 | ||
|
|
5f78691fcf | ||
|
|
a591e06ae5 | ||
|
|
443c93c634 | ||
|
|
5659cddc84 | ||
|
|
2a03a34bde | ||
|
|
1654a9b7e6 | ||
|
|
673a521711 | ||
|
|
2e23076688 | ||
|
|
b92ac55250 | ||
|
|
7981509cc8 | ||
|
|
6d5bc045bc | ||
|
|
d38e020b29 | ||
|
|
7d29030292 | ||
|
|
7c7650b7c3 | ||
|
|
ca80eced24 | ||
|
|
d0e0b81d8e | ||
|
|
391baa1c9a | ||
|
|
ae14681c3e | ||
|
|
4d698841f4 | ||
|
|
9906b3ade9 | ||
|
|
bf1769d3e0 | ||
|
|
63e1ad9f29 | ||
|
|
2c7bcee53f | ||
|
|
1fd90c93ff | ||
|
|
e947a844c9 | ||
|
|
4e2d39293a | ||
|
|
1228d6bf1a | ||
|
|
fc4561c64c | ||
|
|
3b7747b42b | ||
|
|
e432e79324 | ||
|
|
08d74819b6 | ||
|
|
aa3fde1784 | ||
|
|
0b3eb7f218 | ||
|
|
69c9496c71 | ||
|
|
b792b36495 | ||
|
|
d3db7d31a3 | ||
|
|
c05ca63158 | ||
|
|
6d3e0c7db6 | ||
|
|
0e59e50b39 | ||
|
|
d4b391de1b | ||
|
|
de4d3dac00 | ||
|
|
534e7161df | ||
|
|
9b219cd646 | ||
|
|
3bab4822f3 | ||
|
|
3c5f2d8916 | ||
|
|
5808190398 | ||
|
|
b2a82248b1 | ||
|
|
4e5fcca8b9 | ||
|
|
c36eaedb93 | ||
|
|
7752b03add | ||
|
|
01bfc78535 | ||
|
|
074d70112d | ||
|
|
e8d14c0d90 | ||
|
|
60d7e8143a | ||
|
|
9667a170de | ||
|
|
abae30f9e1 | ||
|
|
f9311bc9d1 | ||
|
|
b598402738 | ||
|
|
bd026b913f | ||
|
|
72ff69d9bb | ||
|
|
f30417d9a8 | ||
|
|
47a4ad3cd7 | ||
|
|
2f7a10ab31 | ||
|
|
b534dc69ab | ||
|
|
7b7d2ea7d4 | ||
|
|
e00de1c302 | ||
|
|
3549e583a6 | ||
|
|
f5e3eedf34 | ||
|
|
519dbfebf6 | ||
|
|
9a267f9270 | ||
|
|
67bd71b7a5 | ||
|
|
ec49fff583 | ||
|
|
8b660e18f2 | ||
|
|
981497799a | ||
|
|
b9bdc17465 | ||
|
|
b413ff9fdb | ||
|
|
6a15580817 | ||
|
|
39633a5581 | ||
|
|
1e83f15e2f | ||
|
|
888d2bb1d8 | ||
|
|
847ee5ac45 | ||
|
|
9a9a49aa84 | ||
|
|
a03ca80269 | ||
|
|
523bd769f1 | ||
|
|
8ff70ea5a9 | ||
|
|
da3e7747ca | ||
|
|
4afb59e63f | ||
|
|
1526e7ece3 | ||
|
|
6c07bfee8a | ||
|
|
446c760820 | ||
|
|
04f92f1291 | ||
|
|
4a60a7794d | ||
|
|
e5b16adb1c | ||
|
|
402a3ac719 | ||
|
|
f3d61c51fc | ||
|
|
0cde17ae5d | ||
|
|
8c1bba681b | ||
|
|
dbfb5e797b | ||
|
|
08ff702434 | ||
|
|
0e2148264a | ||
|
|
a75f42344b | ||
|
|
7926401cbd | ||
|
|
8161411c5d | ||
|
|
f64dea2aac | ||
|
|
6579304d8c | ||
|
|
6bb10a81a6 | ||
|
|
3cf8a7c888 | ||
|
|
2e38bb5175 | ||
|
|
a372c6a377 | ||
|
|
93b2f8a0c5 | ||
|
|
1a6568a25d | ||
|
|
9e95703efc | ||
|
|
d8e05aca81 | ||
|
|
410a1ac040 | ||
|
|
4caa3422bd | ||
|
|
a658b976f5 | ||
|
|
135874ebdc | ||
|
|
f4f1c42cba | ||
|
|
e7aa26dc29 | ||
|
|
c54ffde568 | ||
|
|
9a3c992d7a | ||
|
|
0c855638de | ||
|
|
943d815783 | ||
|
|
4c0acba62d | ||
|
|
62c3cdee75 | ||
|
|
3212d0c8cd | ||
|
|
1d03bea965 | ||
|
|
fbfeb59658 | ||
|
|
701da1282a | ||
|
|
df93ff92ba | ||
|
|
77d5331e85 | ||
|
|
14cdadfb56 | ||
|
|
f3a52cc195 | ||
|
|
7640cd24c9 | ||
|
|
f7b665347e | ||
|
|
9693c382a8 | ||
|
|
ee1047bd52 | ||
|
|
5ea5ab162b | ||
|
|
b5a09ff96b | ||
|
|
95c65f4e8f | ||
|
|
6bfff7532e | ||
|
|
1aa8896ad6 | ||
|
|
3e32ceb39f | ||
|
|
ca1350b092 | ||
|
|
9205434ed3 | ||
|
|
cd50e9b4bc | ||
|
|
ec816f3840 | ||
|
|
5f774951b1 | ||
|
|
2ca9befd2a | ||
|
|
72f5cb577e | ||
|
|
928c0181bf | ||
|
|
03767d26da | ||
|
|
108e6f92d4 | ||
|
|
d653a59fc0 | ||
|
|
01bfdf949a | ||
|
|
98f7821eb3 | ||
|
|
2d3898e0d5 | ||
|
|
ae46ce9937 | ||
|
|
dfc112c06b | ||
|
|
ca5fab8656 | ||
|
|
6df76ca73c | ||
|
|
f65dd3e5a2 | ||
|
|
a8d601b64a | ||
|
|
73b4794cf7 | ||
|
|
e2709ea129 | ||
|
|
740ec80819 | ||
|
|
d95e054282 | ||
|
|
7c1f9667d1 | ||
|
|
9246990496 | ||
|
|
0cf3d93360 | ||
|
|
cb06aee5ac | ||
|
|
1c70e9ed1b | ||
|
|
f3d6a2dd37 | ||
|
|
d1c58fc2eb | ||
|
|
b8f05b1471 | ||
|
|
e7baf78ee8 | ||
|
|
87299eba10 | ||
|
|
d3a07c29ba | ||
|
|
8d39b715dc | ||
|
|
7e3166475d | ||
|
|
5206c0e883 | ||
|
|
41ec038523 | ||
|
|
08d3d06a06 | ||
|
|
074febd9e1 | ||
|
|
aa8d25797b | ||
|
|
8d7d4adb91 | ||
|
|
ffa91f9794 | ||
|
|
0c31e61343 | ||
|
|
9b926f7dbe | ||
|
|
35d8728990 | ||
|
|
f7ed9a75ba | ||
|
|
9496c17e13 | ||
|
|
ed64e91f06 | ||
|
|
a481825ae1 | ||
|
|
7bb0f32332 | ||
|
|
c6f8dc431e | ||
|
|
78f177b8ee | ||
|
|
787c44c39d | ||
|
|
f06fee0364 | ||
|
|
c957e0d426 | ||
|
|
04101d472f | ||
|
|
51fc145161 |
11
.github/workflows/go-cross.yml
vendored
11
.github/workflows/go-cross.yml
vendored
@@ -3,12 +3,11 @@ name: Crosscompile
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
@@ -21,11 +20,11 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3
|
||||
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
||||
5
.github/workflows/go-fips.yml
vendored
5
.github/workflows/go-fips.yml
vendored
@@ -3,8 +3,7 @@ name: FIPS Build Test
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
5
.github/workflows/go-healing.yml
vendored
5
.github/workflows/go-healing.yml
vendored
@@ -3,8 +3,7 @@ name: Healing Functional Tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
14
.github/workflows/go-lint.yml
vendored
14
.github/workflows/go-lint.yml
vendored
@@ -3,12 +3,11 @@ name: Linters and Tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
@@ -21,8 +20,8 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest, Windows]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
@@ -30,14 +29,15 @@ jobs:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
if: matrix.os == 'Windows'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
Set-MpPreference -DisableRealtimeMonitoring $true
|
||||
netsh int ipv4 set dynamicport tcp start=60000 num=61000
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 50m ./...
|
||||
go test -v --timeout 120m ./...
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
|
||||
5
.github/workflows/go.yml
vendored
5
.github/workflows/go.yml
vendored
@@ -3,8 +3,7 @@ name: Functional Tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
5
.github/workflows/helm-lint.yml
vendored
5
.github/workflows/helm-lint.yml
vendored
@@ -3,8 +3,7 @@ name: Helm Chart linting
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -23,7 +22,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v3
|
||||
uses: azure/setup-helm@v4
|
||||
|
||||
- name: Run helm lint
|
||||
run: |
|
||||
|
||||
11
.github/workflows/iam-integrations.yaml
vendored
11
.github/workflows/iam-integrations.yaml
vendored
@@ -3,8 +3,7 @@ name: IAM integration
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -62,7 +61,7 @@ jobs:
|
||||
# are turned off - i.e. if ldap="", then ldap server is not enabled for
|
||||
# the tests.
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
ldap: ["", "localhost:389"]
|
||||
etcd: ["", "http://localhost:2379"]
|
||||
openid: ["", "http://127.0.0.1:5556/dex"]
|
||||
@@ -112,6 +111,12 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
go run docs/iam/access-manager-plugin.go &
|
||||
make test-iam
|
||||
- name: Test MinIO Old Version data to IAM import current version
|
||||
if: matrix.ldap == 'ldaphost:389'
|
||||
env:
|
||||
_MINIO_LDAP_TEST_SERVER: ${{ matrix.ldap }}
|
||||
run: |
|
||||
make test-iam-ldap-upgrade-import
|
||||
- name: Test LDAP for automatic site replication
|
||||
if: matrix.ldap == 'localhost:389'
|
||||
run: |
|
||||
|
||||
8
.github/workflows/mint.yml
vendored
8
.github/workflows/mint.yml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -30,7 +29,7 @@ jobs:
|
||||
- name: setup-go-step
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
go-version: 1.22.x
|
||||
|
||||
- name: github sha short
|
||||
id: vars
|
||||
@@ -56,6 +55,11 @@ jobs:
|
||||
run: |
|
||||
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "erasure" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
|
||||
|
||||
# FIXME: renable this back when we have a valid way to add deadlines for PUT()s (internode CreateFile)
|
||||
# - name: resiliency
|
||||
# run: |
|
||||
# ${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "resiliency" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
|
||||
|
||||
- name: The job must cleanup
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
|
||||
78
.github/workflows/mint/minio-resiliency.yaml
vendored
Normal file
78
.github/workflows/mint/minio-resiliency.yaml
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: quay.io/minio/minio:${JOB_NAME}
|
||||
command: server --console-address ":9001" http://minio{1...4}/rdata{1...2}
|
||||
expose:
|
||||
- "9000"
|
||||
- "9001"
|
||||
environment:
|
||||
MINIO_CI_CD: "on"
|
||||
MINIO_ROOT_USER: "minio"
|
||||
MINIO_ROOT_PASSWORD: "minio123"
|
||||
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
|
||||
MINIO_DRIVE_MAX_TIMEOUT: "5s"
|
||||
healthcheck:
|
||||
test: ["CMD", "mc", "ready", "local"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
# it through port 9000.
|
||||
services:
|
||||
minio1:
|
||||
<<: *minio-common
|
||||
hostname: minio1
|
||||
volumes:
|
||||
- rdata1-1:/rdata1
|
||||
- rdata1-2:/rdata2
|
||||
|
||||
minio2:
|
||||
<<: *minio-common
|
||||
hostname: minio2
|
||||
volumes:
|
||||
- rdata2-1:/rdata1
|
||||
- rdata2-2:/rdata2
|
||||
|
||||
minio3:
|
||||
<<: *minio-common
|
||||
hostname: minio3
|
||||
volumes:
|
||||
- rdata3-1:/rdata1
|
||||
- rdata3-2:/rdata2
|
||||
|
||||
minio4:
|
||||
<<: *minio-common
|
||||
hostname: minio4
|
||||
volumes:
|
||||
- rdata4-1:/rdata1
|
||||
- rdata4-2:/rdata2
|
||||
|
||||
nginx:
|
||||
image: nginx:1.19.2-alpine
|
||||
hostname: nginx
|
||||
volumes:
|
||||
- ./nginx-4-node.conf:/etc/nginx/nginx.conf:ro
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
||||
depends_on:
|
||||
- minio1
|
||||
- minio2
|
||||
- minio3
|
||||
- minio4
|
||||
|
||||
## By default this config uses default local driver,
|
||||
## For custom volumes replace with volume driver configuration.
|
||||
volumes:
|
||||
rdata1-1:
|
||||
rdata1-2:
|
||||
rdata2-1:
|
||||
rdata2-2:
|
||||
rdata3-1:
|
||||
rdata3-2:
|
||||
rdata4-1:
|
||||
rdata4-2:
|
||||
7
.github/workflows/mint/nginx-4-node.conf
vendored
7
.github/workflows/mint/nginx-4-node.conf
vendored
@@ -23,10 +23,9 @@ http {
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
||||
16
.github/workflows/mint/nginx-8-node.conf
vendored
16
.github/workflows/mint/nginx-8-node.conf
vendored
@@ -23,14 +23,14 @@ http {
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio5:9000;
|
||||
server minio6:9000;
|
||||
server minio7:9000;
|
||||
server minio8:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio4:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio5:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio6:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio7:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio8:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
||||
8
.github/workflows/mint/nginx.conf
vendored
8
.github/workflows/mint/nginx.conf
vendored
@@ -23,10 +23,10 @@ http {
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio4:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
||||
56
.github/workflows/multipart/migrate.sh
vendored
56
.github/workflows/multipart/migrate.sh
vendored
@@ -24,11 +24,6 @@ if [ ! -f ./mc ]; then
|
||||
chmod +x mc
|
||||
fi
|
||||
|
||||
(
|
||||
cd ./docs/debugging/s3-check-md5
|
||||
go install -v
|
||||
)
|
||||
|
||||
export RELEASE=RELEASE.2023-08-29T23-07-35Z
|
||||
|
||||
docker-compose -f docker-compose-site1.yaml up -d
|
||||
@@ -48,10 +43,10 @@ sleep 30s
|
||||
|
||||
sleep 5
|
||||
|
||||
s3-check-md5 -h
|
||||
./s3-check-md5 -h
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
if [ $failed_count_site1 -ne 0 ]; then
|
||||
echo "failed with multipart on site1 uploads"
|
||||
@@ -67,8 +62,8 @@ fi
|
||||
|
||||
sleep 5
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
## we do not need to fail here, since we are going to test
|
||||
## upgrading to master, healing and being able to recover
|
||||
@@ -96,8 +91,8 @@ for i in $(seq 1 10); do
|
||||
./mc admin heal -r --remove --json site2/ 2>&1 >/dev/null
|
||||
done
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
if [ $failed_count_site1 -ne 0 ]; then
|
||||
echo "failed with multipart on site1 uploads"
|
||||
@@ -109,6 +104,43 @@ if [ $failed_count_site2 -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Add user group test
|
||||
./mc admin user add site1 site-replication-issue-user site-replication-issue-password
|
||||
./mc admin group add site1 site-replication-issue-group site-replication-issue-user
|
||||
|
||||
max_wait_attempts=30
|
||||
wait_interval=5
|
||||
|
||||
attempt=1
|
||||
while true; do
|
||||
diff <(./mc admin group info site1 site-replication-issue-group) <(./mc admin group info site2 site-replication-issue-group)
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Outputs are consistent."
|
||||
break
|
||||
fi
|
||||
|
||||
remaining_attempts=$((max_wait_attempts - attempt))
|
||||
if ((attempt >= max_wait_attempts)); then
|
||||
echo "Outputs remain inconsistent after $max_wait_attempts attempts. Exiting with error."
|
||||
exit 1
|
||||
else
|
||||
echo "Outputs are inconsistent. Waiting for $wait_interval seconds (attempt $attempt/$max_wait_attempts)."
|
||||
sleep $wait_interval
|
||||
fi
|
||||
|
||||
((attempt++))
|
||||
done
|
||||
|
||||
status=$(./mc admin group info site1 site-replication-issue-group --json | jq .groupStatus | tr -d '"')
|
||||
|
||||
if [[ $status == "enabled" ]]; then
|
||||
echo "Success"
|
||||
else
|
||||
echo "Expected status: enabled, actual status: $status"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cleanup
|
||||
|
||||
## change working directory
|
||||
|
||||
17
.github/workflows/replication.yaml
vendored
17
.github/workflows/replication.yaml
vendored
@@ -3,8 +3,7 @@ name: MinIO advanced tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -22,7 +21,7 @@ jobs:
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -36,6 +35,18 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-decom
|
||||
|
||||
- name: Test ILM
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-ilm
|
||||
|
||||
- name: Test PBAC
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-pbac
|
||||
|
||||
- name: Test Config File
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
|
||||
5
.github/workflows/root-disable.yml
vendored
5
.github/workflows/root-disable.yml
vendored
@@ -3,8 +3,7 @@ name: Root lockdown tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
|
||||
17
.github/workflows/run-mint.sh
vendored
17
.github/workflows/run-mint.sh
vendored
@@ -16,7 +16,7 @@ docker volume rm $(docker volume ls -f dangling=true) || true
|
||||
cd .github/workflows/mint
|
||||
|
||||
docker-compose -f minio-${MODE}.yaml up -d
|
||||
sleep 30s
|
||||
sleep 1m
|
||||
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
@@ -26,6 +26,9 @@ docker volume rm $(docker volume ls -q -f dangling=true) || true
|
||||
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio2
|
||||
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio6
|
||||
|
||||
# Pause one node, to check that all S3 calls work while one node goes wrong
|
||||
[ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml pause minio4
|
||||
|
||||
docker run --rm --net=mint_default \
|
||||
--name="mint-${MODE}-${JOB_NAME}" \
|
||||
-e SERVER_ENDPOINT="nginx:9000" \
|
||||
@@ -35,6 +38,18 @@ docker run --rm --net=mint_default \
|
||||
-e MINT_MODE="${MINT_MODE}" \
|
||||
docker.io/minio/mint:edge
|
||||
|
||||
# FIXME: enable this after fixing aws-sdk-java-v2 tests
|
||||
# # unpause the node, to check that all S3 calls work while one node goes wrong
|
||||
# [ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml unpause minio4
|
||||
# [ "${MODE}" == "resiliency" ] && docker run --rm --net=mint_default \
|
||||
# --name="mint-${MODE}-${JOB_NAME}" \
|
||||
# -e SERVER_ENDPOINT="nginx:9000" \
|
||||
# -e ACCESS_KEY="${ACCESS_KEY}" \
|
||||
# -e SECRET_KEY="${SECRET_KEY}" \
|
||||
# -e ENABLE_HTTPS=0 \
|
||||
# -e MINT_MODE="${MINT_MODE}" \
|
||||
# docker.io/minio/mint:edge
|
||||
|
||||
docker-compose -f minio-${MODE}.yaml down || true
|
||||
sleep 10s
|
||||
|
||||
|
||||
3
.github/workflows/shfmt.yml
vendored
3
.github/workflows/shfmt.yml
vendored
@@ -3,8 +3,7 @@ name: Shell formatting checks
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
2
.github/workflows/typos.yml
vendored
2
.github/workflows/typos.yml
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: Test GitHub Action
|
||||
name: Spelling
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
|
||||
5
.github/workflows/upgrade-ci-cd.yaml
vendored
5
.github/workflows/upgrade-ci-cd.yaml
vendored
@@ -3,8 +3,7 @@ name: Upgrade old version tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
|
||||
4
.github/workflows/vulncheck.yml
vendored
4
.github/workflows/vulncheck.yml
vendored
@@ -21,11 +21,11 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.9
|
||||
go-version: 1.22.4
|
||||
check-latest: true
|
||||
- name: Get official govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
shell: bash
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
||||
run: govulncheck -show verbose ./...
|
||||
shell: bash
|
||||
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -43,4 +43,13 @@ docs/debugging/inspect/inspect
|
||||
docs/debugging/pprofgoparser/pprofgoparser
|
||||
docs/debugging/reorder-disks/reorder-disks
|
||||
docs/debugging/populate-hard-links/populate-hardlinks
|
||||
docs/debugging/xattr/xattr
|
||||
docs/debugging/xattr/xattr
|
||||
hash-set
|
||||
healing-bin
|
||||
inspect
|
||||
pprofgoparser
|
||||
reorder-disks
|
||||
s3-check-md5
|
||||
s3-verify
|
||||
xattr
|
||||
xl-meta
|
||||
|
||||
30
.typos.toml
30
.typos.toml
@@ -12,33 +12,27 @@ extend-ignore-re = [
|
||||
"[0-9A-Za-z/+=]{64}",
|
||||
"ZXJuZXQxDjAMBgNVBA-some-junk-Q4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF",
|
||||
"eyJmb28iOiJiYXIifQ",
|
||||
"eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.*",
|
||||
"MIIDBTCCAe2gAwIBAgIQWHw7h.*",
|
||||
'http\.Header\{"X-Amz-Server-Side-Encryptio":',
|
||||
'sessionToken',
|
||||
"ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge",
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
"encrypter" = "encrypter"
|
||||
"requestor" = "requestor"
|
||||
"KMS" = "KMS"
|
||||
"kms" = "kms"
|
||||
"Kms" = "Kms"
|
||||
"Dur" = "Dur"
|
||||
"EOF" = "EOF"
|
||||
"hd" = "hd"
|
||||
"ws" = "ws"
|
||||
"guid" = "guid"
|
||||
"lst" = "lst"
|
||||
"pn" = "pn"
|
||||
"Iy" = "Iy"
|
||||
"ro" = "ro"
|
||||
"thr" = "thr"
|
||||
"requestor" = "requestor"
|
||||
|
||||
[default.extend-identifiers]
|
||||
"bui" = "bui"
|
||||
"toi" = "toi"
|
||||
"ot" = "ot"
|
||||
"dm2nd" = "dm2nd"
|
||||
"HashiCorp" = "HashiCorp"
|
||||
|
||||
[type.go.extend-identifiers]
|
||||
"bui" = "bui"
|
||||
"dm2nd" = "dm2nd"
|
||||
"ot" = "ot"
|
||||
"ParseND" = "ParseND"
|
||||
"ParseNDStream" = "ParseNDStream"
|
||||
"pn" = "pn"
|
||||
"TestGetPartialObjectMisAligned" = "TestGetPartialObjectMisAligned"
|
||||
"thr" = "thr"
|
||||
"toi" = "toi"
|
||||
|
||||
54
Makefile
54
Makefile
@@ -45,20 +45,29 @@ lint-fix: getdeps ## runs golangci-lint suite of linters with automatic fixes
|
||||
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml --fix
|
||||
|
||||
check: test
|
||||
test: verifiers build build-debugging ## builds minio, runs linters, tests
|
||||
test: verifiers build ## builds minio, runs linters, tests
|
||||
@echo "Running unit tests"
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue ./...
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue,dev ./...
|
||||
|
||||
test-root-disable: install-race
|
||||
@echo "Running minio root lockdown tests"
|
||||
@env bash $(PWD)/buildscripts/disable-root.sh
|
||||
|
||||
test-ilm: install-race
|
||||
@echo "Running ILM tests"
|
||||
@env bash $(PWD)/docs/bucket/replication/setup_ilm_expiry_replication.sh
|
||||
|
||||
test-pbac: install-race
|
||||
@echo "Running bucket policies tests"
|
||||
@env bash $(PWD)/docs/iam/policies/pbac-tests.sh
|
||||
|
||||
test-decom: install-race
|
||||
@echo "Running minio decom tests"
|
||||
@env bash $(PWD)/docs/distributed/decom.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted-sse-s3.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-compressed-sse-s3.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted-kes.sh
|
||||
|
||||
test-versioning: install-race
|
||||
@echo "Running minio versioning tests"
|
||||
@@ -75,11 +84,15 @@ test-race: verifiers build ## builds minio, runs linters, tests (race)
|
||||
@echo "Running unit tests under -race"
|
||||
@(env bash $(PWD)/buildscripts/race.sh)
|
||||
|
||||
test-iam: build ## verify IAM (external IDP, etcd backends)
|
||||
test-iam: install-race ## verify IAM (external IDP, etcd backends)
|
||||
@echo "Running tests for IAM (external IDP, etcd backends)"
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
|
||||
test-iam-ldap-upgrade-import: install-race ## verify IAM (external LDAP IDP)
|
||||
@echo "Running upgrade tests for IAM (LDAP backend)"
|
||||
@env bash $(PWD)/buildscripts/minio-iam-ldap-upgrade-import-test.sh
|
||||
|
||||
test-sio-error:
|
||||
@(env bash $(PWD)/docs/bucket/replication/sio-error.sh)
|
||||
@@ -93,7 +106,10 @@ test-replication-3site:
|
||||
test-delete-replication:
|
||||
@(env bash $(PWD)/docs/bucket/replication/delete-replication.sh)
|
||||
|
||||
test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error ## verify multi site replication
|
||||
test-delete-marker-proxying:
|
||||
@(env bash $(PWD)/docs/bucket/replication/test_del_marker_proxying.sh)
|
||||
|
||||
test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error test-delete-marker-proxying ## verify multi site replication
|
||||
@echo "Running tests for replicating three sites"
|
||||
|
||||
test-site-replication-ldap: install-race ## verify automatic site replication
|
||||
@@ -114,36 +130,32 @@ test-site-replication-minio: install-race ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of SSE-C objects with compression enabled for site"
|
||||
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication-with-compression.sh)
|
||||
|
||||
verify: ## verify minio various setups
|
||||
verify: install-race ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
verify-healing: ## verify healing and replacing disks with minio binary
|
||||
verify-healing: install-race ## verify healing and replacing disks with minio binary
|
||||
@echo "Verify healing build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
@(env bash $(PWD)/buildscripts/verify-healing-empty-erasure-set.sh)
|
||||
@(env bash $(PWD)/buildscripts/heal-inconsistent-versions.sh)
|
||||
|
||||
verify-healing-with-root-disks: ## verify healing root disks
|
||||
verify-healing-with-root-disks: install-race ## verify healing root disks
|
||||
@echo "Verify healing with root drives"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing-with-root-disks.sh)
|
||||
|
||||
verify-healing-with-rewrite: ## verify healing to rewrite old xl.meta -> new xl.meta
|
||||
verify-healing-with-rewrite: install-race ## verify healing to rewrite old xl.meta -> new xl.meta
|
||||
@echo "Verify healing with rewrite"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/rewrite-old-new.sh)
|
||||
|
||||
verify-healing-inconsistent-versions: ## verify resolving inconsistent versions
|
||||
verify-healing-inconsistent-versions: install-race ## verify resolving inconsistent versions
|
||||
@echo "Verify resolving inconsistent versions build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)
|
||||
|
||||
build-debugging:
|
||||
@(env bash $(PWD)/docs/debugging/build.sh)
|
||||
|
||||
build: checks ## builds minio to $(PWD)
|
||||
build: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
@@ -182,15 +194,15 @@ docker: build ## builds minio docker container
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
|
||||
|
||||
install-race: checks ## builds minio to $(PWD)
|
||||
install-race: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary with -race to './minio'"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue,dev -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@echo "Installing minio binary with -race to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
|
||||
|
||||
install: build ## builds minio and installs it to $GOPATH/bin.
|
||||
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@echo "Installation successful. To learn more, try \"minio --help\"."
|
||||
|
||||
clean: ## cleanup all generated assets
|
||||
|
||||
@@ -210,10 +210,6 @@ For deployments behind a load balancer, proxy, or ingress rule where the MinIO h
|
||||
|
||||
For example, consider a MinIO deployment behind a proxy `https://minio.example.net`, `https://console.minio.example.net` with rules for forwarding traffic on port :9000 and :9001 to MinIO and the MinIO Console respectively on the internal network. Set `MINIO_BROWSER_REDIRECT_URL` to `https://console.minio.example.net` to ensure the browser receives a valid reachable URL.
|
||||
|
||||
Similarly, if your TLS certificates do not have the IP SAN for the MinIO server host, the MinIO Console may fail to validate the connection to the server. Use the `MINIO_SERVER_URL` environment variable and specify the proxy-accessible hostname of the MinIO server to allow the Console to use the MinIO server API using the TLS certificate.
|
||||
|
||||
For example: `export MINIO_SERVER_URL="https://minio.example.net"`
|
||||
|
||||
| Dashboard | Creating a bucket |
|
||||
| ------------- | ------------- |
|
||||
|  |  |
|
||||
|
||||
0
buildscripts/checkdeps.sh
Normal file → Executable file
0
buildscripts/checkdeps.sh
Normal file → Executable file
@@ -32,6 +32,7 @@ fi
|
||||
set +e
|
||||
|
||||
export MC_HOST_minioadm=http://minioadmin:minioadmin@localhost:9100/
|
||||
./mc ready minioadm
|
||||
|
||||
./mc ls minioadm/
|
||||
|
||||
@@ -56,7 +57,7 @@ done
|
||||
|
||||
set +e
|
||||
|
||||
sleep 10
|
||||
./mc ready minioadm/
|
||||
|
||||
./mc ls minioadm/
|
||||
if [ $? -ne 0 ]; then
|
||||
@@ -81,11 +82,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
|
||||
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
|
||||
|
||||
sleep 20s
|
||||
|
||||
export MC_HOST_sitea=http://minioadmin:minioadmin@127.0.0.1:9001
|
||||
export MC_HOST_siteb=http://minioadmin:minioadmin@127.0.0.1:9004
|
||||
|
||||
./mc ready sitea
|
||||
./mc ready siteb
|
||||
|
||||
./mc admin replicate add sitea siteb
|
||||
|
||||
./mc admin user add sitea foobar foo12345
|
||||
@@ -109,11 +111,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
|
||||
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
|
||||
|
||||
sleep 20s
|
||||
|
||||
export MC_HOST_sitea=http://foobar:foo12345@127.0.0.1:9001
|
||||
export MC_HOST_siteb=http://foobar:foo12345@127.0.0.1:9004
|
||||
|
||||
./mc ready sitea
|
||||
./mc ready siteb
|
||||
|
||||
./mc admin user add sitea foobar-admin foo12345
|
||||
|
||||
sleep 2s
|
||||
|
||||
127
buildscripts/minio-iam-ldap-upgrade-import-test.sh
Executable file
127
buildscripts/minio-iam-ldap-upgrade-import-test.sh
Executable file
@@ -0,0 +1,127 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is used to test the migration of IAM content from old minio
|
||||
# instance to new minio instance.
|
||||
#
|
||||
# To run it locally, start the LDAP server in github.com/minio/minio-iam-testing
|
||||
# repo (e.g. make podman-run), and then run this script.
|
||||
#
|
||||
# This script assumes that LDAP server is at:
|
||||
#
|
||||
# `localhost:1389`
|
||||
#
|
||||
# if this is not the case, set the environment variable
|
||||
# `_MINIO_LDAP_TEST_SERVER`.
|
||||
|
||||
OLD_VERSION=RELEASE.2024-03-26T22-10-45Z
|
||||
OLD_BINARY_LINK=https://dl.min.io/server/minio/release/linux-amd64/archive/minio.${OLD_VERSION}
|
||||
|
||||
__init__() {
|
||||
if which curl &>/dev/null; then
|
||||
echo "curl is already installed"
|
||||
else
|
||||
echo "Installing curl:"
|
||||
sudo apt install curl -y
|
||||
fi
|
||||
|
||||
export GOPATH=/tmp/gopath
|
||||
export PATH="${PATH}":"${GOPATH}"/bin
|
||||
|
||||
if which mc &>/dev/null; then
|
||||
echo "mc is already installed"
|
||||
else
|
||||
echo "Installing mc:"
|
||||
go install github.com/minio/mc@latest
|
||||
fi
|
||||
|
||||
if [ ! -x ./minio.${OLD_VERSION} ]; then
|
||||
echo "Downloading minio.${OLD_VERSION} binary"
|
||||
curl -o minio.${OLD_VERSION} ${OLD_BINARY_LINK}
|
||||
chmod +x minio.${OLD_VERSION}
|
||||
fi
|
||||
|
||||
if [ -z "$_MINIO_LDAP_TEST_SERVER" ]; then
|
||||
export _MINIO_LDAP_TEST_SERVER=localhost:1389
|
||||
echo "Using default LDAP endpoint: $_MINIO_LDAP_TEST_SERVER"
|
||||
fi
|
||||
|
||||
rm -rf /tmp/data
|
||||
}
|
||||
|
||||
create_iam_content_in_old_minio() {
|
||||
echo "Creating IAM content in old minio instance."
|
||||
|
||||
MINIO_CI_CD=1 ./minio.${OLD_VERSION} server /tmp/data/{1...4} &
|
||||
sleep 5
|
||||
|
||||
set -x
|
||||
mc alias set old-minio http://localhost:9000 minioadmin minioadmin
|
||||
mc ready old-minio
|
||||
mc idp ldap add old-minio \
|
||||
server_addr=localhost:1389 \
|
||||
server_insecure=on \
|
||||
lookup_bind_dn=cn=admin,dc=min,dc=io \
|
||||
lookup_bind_password=admin \
|
||||
user_dn_search_base_dn=dc=min,dc=io \
|
||||
user_dn_search_filter="(uid=%s)" \
|
||||
group_search_base_dn=ou=swengg,dc=min,dc=io \
|
||||
group_search_filter="(&(objectclass=groupOfNames)(member=%d))"
|
||||
mc admin service restart old-minio
|
||||
|
||||
mc idp ldap policy attach old-minio readwrite --user=UID=dillon,ou=people,ou=swengg,dc=min,dc=io
|
||||
mc idp ldap policy attach old-minio readwrite --group=CN=project.c,ou=groups,ou=swengg,dc=min,dc=io
|
||||
|
||||
mc idp ldap policy entities old-minio
|
||||
|
||||
mc admin cluster iam export old-minio
|
||||
set +x
|
||||
|
||||
mc admin service stop old-minio
|
||||
}
|
||||
|
||||
import_iam_content_in_new_minio() {
|
||||
echo "Importing IAM content in new minio instance."
|
||||
# Assume current minio binary exists.
|
||||
MINIO_CI_CD=1 ./minio server /tmp/data/{1...4} &
|
||||
sleep 5
|
||||
|
||||
set -x
|
||||
mc alias set new-minio http://localhost:9000 minioadmin minioadmin
|
||||
echo "BEFORE IMPORT mappings:"
|
||||
mc ready new-minio
|
||||
mc idp ldap policy entities new-minio
|
||||
mc admin cluster iam import new-minio ./old-minio-iam-info.zip
|
||||
echo "AFTER IMPORT mappings:"
|
||||
mc idp ldap policy entities new-minio
|
||||
set +x
|
||||
|
||||
# mc admin service stop new-minio
|
||||
}
|
||||
|
||||
verify_iam_content_in_new_minio() {
|
||||
output=$(mc idp ldap policy entities new-minio --json)
|
||||
|
||||
groups=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .groups[]')
|
||||
if [ "$groups" != "cn=project.c,ou=groups,ou=swengg,dc=min,dc=io" ]; then
|
||||
echo "Failed to verify groups: $groups"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
users=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .users[]')
|
||||
if [ "$users" != "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" ]; then
|
||||
echo "Failed to verify users: $users"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mc admin service stop new-minio
|
||||
}
|
||||
|
||||
main() {
|
||||
create_iam_content_in_old_minio
|
||||
|
||||
import_iam_content_in_new_minio
|
||||
|
||||
verify_iam_content_in_new_minio
|
||||
}
|
||||
|
||||
(__init__ "$@" && main "$@")
|
||||
22
buildscripts/minio-upgrade.sh
Normal file → Executable file
22
buildscripts/minio-upgrade.sh
Normal file → Executable file
@@ -4,10 +4,22 @@ trap 'cleanup $LINENO' ERR
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
cleanup() {
|
||||
MINIO_VERSION=dev docker-compose \
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
rm -s -f
|
||||
down || true
|
||||
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
rm || true
|
||||
|
||||
for volume in $(docker volume ls -q | grep upgrade); do
|
||||
docker volume rm ${volume} || true
|
||||
done
|
||||
|
||||
docker volume prune -f
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
docker volume rm $(docker volume ls -q -f dangling=true) || true
|
||||
}
|
||||
|
||||
verify_checksum_after_heal() {
|
||||
@@ -60,6 +72,8 @@ __init__() {
|
||||
go install github.com/docker/compose/v2/cmd@latest
|
||||
mv -v /tmp/gopath/bin/cmd /tmp/gopath/bin/docker-compose
|
||||
|
||||
cleanup
|
||||
|
||||
TAG=minio/minio:dev make docker
|
||||
|
||||
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
|
||||
@@ -77,11 +91,11 @@ __init__() {
|
||||
|
||||
curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum
|
||||
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
|
||||
}
|
||||
|
||||
main() {
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
|
||||
|
||||
add_alias
|
||||
|
||||
|
||||
@@ -45,7 +45,8 @@ function verify_rewrite() {
|
||||
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready minio/
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
@@ -77,7 +78,8 @@ function verify_rewrite() {
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready minio/
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
@@ -87,17 +89,12 @@ function verify_rewrite() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(
|
||||
cd ./docs/debugging/s3-check-md5
|
||||
go install -v
|
||||
)
|
||||
|
||||
if ! s3-check-md5 \
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
-endpoint "http://127.0.0.1:${start_port}/" 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
@@ -117,7 +114,7 @@ function verify_rewrite() {
|
||||
go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123"
|
||||
sleep 1
|
||||
|
||||
if ! s3-check-md5 \
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: minio/minio:${MINIO_VERSION}
|
||||
|
||||
@@ -15,13 +15,14 @@ WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
export MINT_MODE=core
|
||||
export MINT_DATA_DIR="$WORK_DIR/data"
|
||||
export SERVER_ENDPOINT="127.0.0.1:9000"
|
||||
export MC_HOST_verify="http://minio:minio123@${SERVER_ENDPOINT}/"
|
||||
export MC_HOST_verify_ipv6="http://minio:minio123@[::1]:9000/"
|
||||
export ACCESS_KEY="minio"
|
||||
export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
export GOGC=25
|
||||
export ENABLE_ADMIN=1
|
||||
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
@@ -36,18 +37,21 @@ function start_minio_fs() {
|
||||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_erasure() {
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
|
||||
sleep 15
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_erasure_sets() {
|
||||
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
|
||||
"${MINIO[@]}" server >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
sleep 15
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_pool_erasure_sets() {
|
||||
@@ -57,7 +61,7 @@ function start_minio_pool_erasure_sets() {
|
||||
"${MINIO[@]}" server --address ":9000" >"$WORK_DIR/pool-minio-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address ":9001" >"$WORK_DIR/pool-minio-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_pool_erasure_sets_ipv6() {
|
||||
@@ -67,7 +71,7 @@ function start_minio_pool_erasure_sets_ipv6() {
|
||||
"${MINIO[@]}" server --address="[::1]:9000" >"$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" >"$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify_ipv6
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure() {
|
||||
@@ -78,7 +82,7 @@ function start_minio_dist_erasure() {
|
||||
"${MINIO[@]}" server --address ":900${i}" >"$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
|
||||
done
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function run_test_fs() {
|
||||
@@ -222,7 +226,7 @@ function __init__() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "${WORK_DIR}/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
||||
166
buildscripts/verify-healing-empty-erasure-set.sh
Executable file
166
buildscripts/verify-healing-empty-erasure-set.sh
Executable file
@@ -0,0 +1,166 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
|
||||
function start_minio_3_node() {
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
start_port=$1
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
pid1=$!
|
||||
disown ${pid1}
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 2))" $args >"${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
pid2=$!
|
||||
disown $pid2
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 3))" $args >"${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
pid3=$!
|
||||
disown $pid3
|
||||
|
||||
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
|
||||
|
||||
/tmp/mc ready myminio
|
||||
|
||||
# Wait for all drives to be online and formatted
|
||||
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].state | select(. != "ok")' | wc -l) -gt 0 ]; do sleep 1; done
|
||||
# Wait for all drives to be healed
|
||||
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].healing | select(. != null) | select(. == true)' | wc -l) -gt 0 ]; do sleep 1; done
|
||||
|
||||
# Wait for Status: in MinIO output
|
||||
while true; do
|
||||
rv=$(check_online)
|
||||
if [ "$rv" != "1" ]; then
|
||||
# success
|
||||
break
|
||||
fi
|
||||
|
||||
# Check if we should retry
|
||||
retry=$((retry + 1))
|
||||
if [ $retry -le 20 ]; then
|
||||
sleep 5
|
||||
continue
|
||||
fi
|
||||
|
||||
# Failure
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
done
|
||||
|
||||
if ! ps -p $pid1 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ps -p $pid2 1>&2 >/dev/null; then
|
||||
echo "server2 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server2.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ps -p $pid3 1>&2 >/dev/null; then
|
||||
echo "server3 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server3.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
if pgrep minio; then
|
||||
# forcibly killing, to proceed further properly.
|
||||
if ! pkill -9 minio; then
|
||||
echo "no minio process running anymore, proceed."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function check_online() {
|
||||
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
fi
|
||||
}
|
||||
|
||||
function purge() {
|
||||
echo rm -rf "$1"
|
||||
}
|
||||
|
||||
function __init__() {
|
||||
echo "Initializing environment"
|
||||
mkdir -p "$WORK_DIR"
|
||||
mkdir -p "$MINIO_CONFIG_DIR"
|
||||
|
||||
## version is purposefully set to '3' for minio to migrate configuration file
|
||||
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
|
||||
|
||||
if [ ! -f /tmp/mc ]; then
|
||||
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x /tmp/mc
|
||||
fi
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node $2
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
set -x
|
||||
start_minio_3_node $2
|
||||
}
|
||||
|
||||
function main() {
|
||||
# use same ports for all tests
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
perform_test "2" ${start_port}
|
||||
perform_test "1" ${start_port}
|
||||
perform_test "3" ${start_port}
|
||||
}
|
||||
|
||||
(__init__ "$@" && main "$@")
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
@@ -12,17 +12,26 @@ fi
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
GOPATH=/tmp/gopath
|
||||
|
||||
function start_minio_3_node() {
|
||||
for i in $(seq 1 3); do
|
||||
rm "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
start_port=$2
|
||||
first_time=$(find ${WORK_DIR}/ | grep format.json | wc -l)
|
||||
|
||||
start_port=$1
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
|
||||
for d in $(seq 1 3 5); do
|
||||
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
|
||||
d=$((d + 1))
|
||||
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
@@ -37,7 +46,11 @@ function start_minio_3_node() {
|
||||
pid3=$!
|
||||
disown $pid3
|
||||
|
||||
sleep "$1"
|
||||
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
|
||||
/tmp/mc ready myminio
|
||||
|
||||
[ ${first_time} -eq 0 ] && upload_objects
|
||||
[ ${first_time} -ne 0 ] && sleep 120
|
||||
|
||||
if ! ps -p $pid1 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
@@ -82,10 +95,23 @@ function start_minio_3_node() {
|
||||
fi
|
||||
}
|
||||
|
||||
function check_online() {
|
||||
function check_heal() {
|
||||
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
return 1
|
||||
fi
|
||||
|
||||
for ((i = 0; i < 20; i++)); do
|
||||
test -f ${WORK_DIR}/$1/1/.minio.sys/format.json
|
||||
v1=$?
|
||||
nextInES=$(($1 + 1)) && [ $nextInES -gt 3 ] && nextInES=1
|
||||
foundFiles1=$(find ${WORK_DIR}/$1/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
|
||||
foundFiles2=$(find ${WORK_DIR}/$nextInES/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
|
||||
test $foundFiles1 -eq $foundFiles2
|
||||
v2=$?
|
||||
[ $v1 == 0 -a $v2 == 0 ] && return 0
|
||||
sleep 10
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
function purge() {
|
||||
@@ -99,20 +125,35 @@ function __init__() {
|
||||
|
||||
## version is purposefully set to '3' for minio to migrate configuration file
|
||||
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
|
||||
|
||||
if [ ! -f /tmp/mc ]; then
|
||||
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x /tmp/mc
|
||||
fi
|
||||
}
|
||||
|
||||
function upload_objects() {
|
||||
/tmp/mc mb myminio/testbucket/
|
||||
for ((i = 0; i < 20; i++)); do
|
||||
echo "my content" | /tmp/mc pipe myminio/testbucket/file-$i
|
||||
done
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node 120 $2
|
||||
start_port=$2
|
||||
|
||||
start_minio_3_node $start_port
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
echo "Remove the contents of the disks belonging to '${1}' node"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
set -x
|
||||
start_minio_3_node 120 $2
|
||||
start_minio_3_node $start_port
|
||||
|
||||
rv=$(check_online)
|
||||
check_heal ${1}
|
||||
rv=$?
|
||||
if [ "$rv" == "1" ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Data types used for returning dummy access control
|
||||
|
||||
@@ -40,7 +40,7 @@ import (
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -735,7 +735,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("An Object Lock configuration is present on this bucket, so the versioning state cannot be suspended."))
|
||||
continue
|
||||
}
|
||||
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
|
||||
if rcfg, _ := getReplicationConfig(ctx, bucket); rcfg != nil && v.Suspended() {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("A replication configuration is present on this bucket, so the versioning state cannot be suspended."))
|
||||
continue
|
||||
}
|
||||
@@ -783,7 +783,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
switch fileName {
|
||||
case bucketNotificationConfig:
|
||||
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalEventNotifier.targetList)
|
||||
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region(), globalEventNotifier.targetList)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
||||
continue
|
||||
@@ -837,9 +837,13 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
rcfg, err := globalBucketObjectLockSys.Get(bucket)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
// Validate the received bucket policy document
|
||||
if err = bucketLifecycle.Validate(); err != nil {
|
||||
if err = bucketLifecycle.Validate(rcfg); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
@@ -874,8 +878,10 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
|
||||
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: kmsKey,
|
||||
AssociatedData: kms.Context{"MinIO admin API": "ServerInfoHandler"}, // Context for a test key operation
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
rpt.SetStatus(bucket, fileName, errKMSKeyNotFound)
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// validateAdminReq will validate request against and return whether it is allowed.
|
||||
|
||||
@@ -37,7 +37,7 @@ import (
|
||||
"github.com/minio/minio/internal/config/subnet"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
|
||||
|
||||
@@ -32,8 +32,8 @@ import (
|
||||
cfgldap "github.com/minio/minio/internal/config/identity/ldap"
|
||||
"github.com/minio/minio/internal/config/identity/openid"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/ldap"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, isUpdate bool) {
|
||||
|
||||
@@ -20,6 +20,7 @@ package cmd
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -27,7 +28,8 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
xldap "github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// ListLDAPPolicyMappingEntities lists users/groups mapped to given/all policies.
|
||||
@@ -222,9 +224,8 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
err error
|
||||
)
|
||||
|
||||
// If we are creating svc account for request sender, ensure
|
||||
// that targetUser is a real user (i.e. not derived
|
||||
// credentials).
|
||||
// If we are creating svc account for request sender, ensure that targetUser
|
||||
// is a real user (i.e. not derived credentials).
|
||||
if isSvcAccForRequestor {
|
||||
if requestorIsDerivedCredential {
|
||||
if requestorParentUser == "" {
|
||||
@@ -237,12 +238,12 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
targetGroups = requestorGroups
|
||||
|
||||
// Deny if the target user is not LDAP
|
||||
foundLDAPDN, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
|
||||
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if foundLDAPDN == "" {
|
||||
if foundResult == nil {
|
||||
err := errors.New("Specified user does not exist on LDAP server")
|
||||
APIErr := errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err)
|
||||
writeErrorResponseJSON(ctx, w, APIErr, r.URL)
|
||||
@@ -265,7 +266,8 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
|
||||
isDN := globalIAMSys.LDAPConfig.ParsesAsDN(targetUser)
|
||||
opts.claims[ldapUserN] = targetUser // simple username
|
||||
targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
var lookupResult *xldap.DNSearchResult
|
||||
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
if err != nil {
|
||||
// if not found, check if DN
|
||||
if strings.Contains(err.Error(), "User DN not found for:") {
|
||||
@@ -279,7 +281,26 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targetUser = lookupResult.NormDN
|
||||
opts.claims[ldapUser] = targetUser // DN
|
||||
opts.claims[ldapActualUser] = lookupResult.ActualDN
|
||||
|
||||
// Check if this user or their groups have a policy applied.
|
||||
ldapPolicies, err := globalIAMSys.PolicyDBGet(targetUser, targetGroups...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if len(ldapPolicies) == 0 {
|
||||
err = fmt.Errorf("No policy set for user `%s` or any of their groups: `%s`", opts.claims[ldapActualUser], strings.Join(targetGroups, "`,`"))
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Add LDAP attributes that were looked up into the claims.
|
||||
for attribKey, attribValue := range lookupResult.Attributes {
|
||||
opts.claims[ldapAttribPrefix+attribKey] = attribValue
|
||||
}
|
||||
}
|
||||
|
||||
newCred, updatedAt, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts)
|
||||
@@ -386,15 +407,16 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
targetAccount, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
|
||||
dnResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if targetAccount == "" {
|
||||
if dnResult == nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errNoSuchUser), r.URL)
|
||||
return
|
||||
}
|
||||
targetAccount := dnResult.NormDN
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
if listType != "sts-only" && listType != "svcacc-only" && listType != "" {
|
||||
|
||||
@@ -27,8 +27,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// SiteReplicationAdd - PUT /minio/admin/v3/site-replication/add
|
||||
@@ -347,6 +347,18 @@ func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.R
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Report the ILMExpiryStats only if at least one site has replication of ILM expiry enabled
|
||||
var replicateILMExpiry bool
|
||||
for _, site := range info.Sites {
|
||||
if site.ReplicateILMExpiry {
|
||||
replicateILMExpiry = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !replicateILMExpiry {
|
||||
// explicitly send nil for ILMExpiryStats
|
||||
info.ILMExpiryStats = nil
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(info); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/pkg/v2/sync/errgroup"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
)
|
||||
|
||||
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
@@ -36,7 +37,8 @@ import (
|
||||
"github.com/minio/minio/internal/cachevalue"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
xldap "github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
@@ -271,7 +273,14 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
}
|
||||
updatedAt, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
|
||||
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
// We don't allow internal group manipulation in this API when LDAP
|
||||
// is enabled for now.
|
||||
err = errIAMActionNotAllowed
|
||||
} else {
|
||||
updatedAt, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -466,6 +475,11 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if !utf8.ValidString(accessKey) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserValidUTF), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
checkDenyOnly := false
|
||||
if accessKey == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
@@ -507,6 +521,12 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// We don't allow internal user creation with LDAP enabled for now.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
updatedAt, err := globalIAMSys.CreateUser(ctx, accessKey, ureq)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -687,12 +707,20 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
// In case of LDAP we need to resolve the targetUser to a DN and
|
||||
// query their groups:
|
||||
opts.claims[ldapUserN] = targetUser // simple username
|
||||
targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
var lookupResult *xldap.DNSearchResult
|
||||
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targetUser = lookupResult.NormDN
|
||||
opts.claims[ldapUser] = targetUser // username DN
|
||||
opts.claims[ldapActualUser] = lookupResult.ActualDN
|
||||
|
||||
// Add LDAP attributes that were looked up into the claims.
|
||||
for attribKey, attribValue := range lookupResult.Attributes {
|
||||
opts.claims[ldapAttribPrefix+attribKey] = attribValue
|
||||
}
|
||||
|
||||
// NOTE: if not using LDAP, then internal IDP or open ID is
|
||||
// being used - in the former, group info is enforced when
|
||||
@@ -802,7 +830,11 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
condValues := getConditionValues(r, "", cred)
|
||||
addExpirationToCondValues(updateReq.NewExpiration, condValues)
|
||||
err = addExpirationToCondValues(updateReq.NewExpiration, condValues)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Permission checks:
|
||||
//
|
||||
@@ -1026,8 +1058,13 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
for _, svc := range serviceAccounts {
|
||||
expiryTime := svc.Expiration
|
||||
serviceAccountList = append(serviceAccountList, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
Description: svc.Description,
|
||||
ParentUser: svc.ParentUser,
|
||||
Name: svc.Name,
|
||||
AccountStatus: svc.Status,
|
||||
AccessKey: svc.AccessKey,
|
||||
ImpliedPolicy: svc.IsImpliedPolicy(),
|
||||
Expiration: &expiryTime,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1199,9 +1236,9 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
bucketStorageCache.InitOnce(10*time.Second,
|
||||
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
|
||||
func() (DataUsageInfo, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
cachevalue.Opts{ReturnLastGood: true},
|
||||
func(ctx context.Context) (DataUsageInfo, error) {
|
||||
ctx, done := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objectAPI)
|
||||
@@ -1556,7 +1593,12 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
}))
|
||||
}
|
||||
|
||||
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
// SetPolicyForUserOrGroup - sets a policy on a user or a group.
|
||||
//
|
||||
// PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
//
|
||||
// Deprecated: This API is replaced by attach/detach policy APIs for specific
|
||||
// type of users (builtin or LDAP).
|
||||
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@@ -1608,6 +1650,32 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
userType := regUser
|
||||
if globalIAMSys.GetUsersSysType() == LDAPUsersSysType {
|
||||
userType = stsUser
|
||||
|
||||
// Validate that the user or group exists in LDAP and use the normalized
|
||||
// form of the entityName (which will be an LDAP DN).
|
||||
var err error
|
||||
if isGroup {
|
||||
var foundGroupDN *xldap.DNSearchResult
|
||||
var underBaseDN bool
|
||||
if foundGroupDN, underBaseDN, err = globalIAMSys.LDAPConfig.GetValidatedGroupDN(nil, entityName); err != nil {
|
||||
iamLogIf(ctx, err)
|
||||
} else if foundGroupDN == nil || !underBaseDN {
|
||||
err = errNoSuchGroup
|
||||
}
|
||||
entityName = foundGroupDN.NormDN
|
||||
} else {
|
||||
var foundUserDN *xldap.DNSearchResult
|
||||
if foundUserDN, err = globalIAMSys.LDAPConfig.GetValidatedDNForUsername(entityName); err != nil {
|
||||
iamLogIf(ctx, err)
|
||||
} else if foundUserDN == nil {
|
||||
err = errNoSuchUser
|
||||
}
|
||||
entityName = foundUserDN.NormDN
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
updatedAt, err := globalIAMSys.PolicyDBSet(ctx, entityName, policyName, userType, isGroup)
|
||||
@@ -1763,9 +1831,20 @@ const (
|
||||
userPolicyMappingsFile = "user_mappings.json"
|
||||
groupPolicyMappingsFile = "group_mappings.json"
|
||||
stsUserPolicyMappingsFile = "stsuser_mappings.json"
|
||||
iamAssetsDir = "iam-assets"
|
||||
|
||||
iamAssetsDir = "iam-assets"
|
||||
)
|
||||
|
||||
var iamExportFiles = []string{
|
||||
allPoliciesFile,
|
||||
allUsersFile,
|
||||
allGroupsFile,
|
||||
allSvcAcctsFile,
|
||||
userPolicyMappingsFile,
|
||||
groupPolicyMappingsFile,
|
||||
stsUserPolicyMappingsFile,
|
||||
}
|
||||
|
||||
// ExportIAMHandler - exports all iam info as a zipped file
|
||||
func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
@@ -1804,16 +1883,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return nil
|
||||
}
|
||||
|
||||
iamFiles := []string{
|
||||
allPoliciesFile,
|
||||
allUsersFile,
|
||||
allGroupsFile,
|
||||
allSvcAcctsFile,
|
||||
userPolicyMappingsFile,
|
||||
groupPolicyMappingsFile,
|
||||
stsUserPolicyMappingsFile,
|
||||
}
|
||||
for _, f := range iamFiles {
|
||||
for _, f := range iamExportFiles {
|
||||
iamFile := pathJoin(iamAssetsDir, f)
|
||||
switch f {
|
||||
case allPoliciesFile:
|
||||
@@ -1898,7 +1968,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
_, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.Credentials.AccessKey)
|
||||
sa, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.Credentials.AccessKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
@@ -1920,6 +1990,9 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
Claims: claims,
|
||||
SessionPolicy: json.RawMessage(policyJSON),
|
||||
Status: acc.Credentials.Status,
|
||||
Name: sa.Name,
|
||||
Description: sa.Description,
|
||||
Expiration: &sa.Expiration,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2152,12 +2225,12 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
// If group does not exist, then check if the group has beginning and end space characters
|
||||
// we will reject such group names.
|
||||
if errors.Is(gerr, errNoSuchGroup) && hasSpaceBE(group) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, err, allGroupsFile, group), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, gerr, allGroupsFile, group), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, gerr := globalIAMSys.AddUsersToGroup(ctx, group, grpInfo.Members); gerr != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allGroupsFile, group), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, gerr, allGroupsFile, group), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -2184,6 +2257,16 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, allSvcAcctsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
err := globalIAMSys.NormalizeLDAPAccessKeypairs(ctx, serviceAcctReqs)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for user, svcAcctReq := range serviceAcctReqs {
|
||||
var sp *policy.Policy
|
||||
var err error
|
||||
@@ -2194,7 +2277,8 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// service account access key cannot have space characters beginning and end of the string.
|
||||
// service account access key cannot have space characters
|
||||
// beginning and end of the string.
|
||||
if hasSpaceBE(svcAcctReq.AccessKey) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
return
|
||||
@@ -2220,20 +2304,14 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
updateReq = false
|
||||
}
|
||||
if updateReq {
|
||||
opts := updateServiceAccountOpts{
|
||||
secretKey: svcAcctReq.SecretKey,
|
||||
status: svcAcctReq.Status,
|
||||
name: svcAcctReq.Name,
|
||||
description: svcAcctReq.Description,
|
||||
expiration: svcAcctReq.Expiration,
|
||||
sessionPolicy: sp,
|
||||
}
|
||||
_, err = globalIAMSys.UpdateServiceAccount(ctx, svcAcctReq.AccessKey, opts)
|
||||
// If the service account exists, we remove it to ensure a
|
||||
// clean import.
|
||||
err := globalIAMSys.DeleteServiceAccount(ctx, svcAcctReq.AccessKey, true)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
|
||||
delErr := fmt.Errorf("failed to delete existing service account(%s) before importing it: %w", svcAcctReq.AccessKey, err)
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, delErr, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
opts := newServiceAccountOpts{
|
||||
accessKey: user,
|
||||
@@ -2246,18 +2324,6 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
allowSiteReplicatorAccount: false,
|
||||
}
|
||||
|
||||
// In case of LDAP we need to resolve the targetUser to a DN and
|
||||
// query their groups:
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
opts.claims[ldapUserN] = svcAcctReq.AccessKey // simple username
|
||||
targetUser, _, err := globalIAMSys.LDAPConfig.LookupUserDN(svcAcctReq.AccessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
}
|
||||
opts.claims[ldapUser] = targetUser // username DN
|
||||
}
|
||||
|
||||
if _, _, err = globalIAMSys.NewServiceAccount(ctx, svcAcctReq.Parent, svcAcctReq.Groups, opts); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
@@ -2326,6 +2392,17 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, groupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
isGroup := true
|
||||
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, isGroup, grpPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for g, pm := range grpPolicyMap {
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, g, pm.Policies, unknownIAMUserType, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, g), r.URL)
|
||||
@@ -2355,6 +2432,16 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, stsUserPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
isGroup := true
|
||||
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, !isGroup, userPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
for u, pm := range userPolicyMap {
|
||||
// disallow setting policy mapping if user is a temporary user
|
||||
ok, _, err := globalIAMSys.IsTempUser(u)
|
||||
@@ -2366,6 +2453,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, errIAMActionNotAllowed, stsUserPolicyMappingsFile, u), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, u, pm.Policies, stsUser, false); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, u), r.URL)
|
||||
return
|
||||
@@ -2375,11 +2463,16 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) {
|
||||
if exp == nil {
|
||||
return
|
||||
func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) error {
|
||||
if exp == nil || exp.IsZero() || exp.Equal(timeSentinel) {
|
||||
return nil
|
||||
}
|
||||
condValues["DurationSeconds"] = []string{strconv.FormatInt(int64(exp.Sub(time.Now()).Seconds()), 10)}
|
||||
dur := exp.Sub(time.Now())
|
||||
if dur <= 0 {
|
||||
return errors.New("unsupported expiration time")
|
||||
}
|
||||
condValues["DurationSeconds"] = []string{strconv.FormatInt(int64(dur.Seconds()), 10)}
|
||||
return nil
|
||||
}
|
||||
|
||||
func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials, newServiceAccountOpts, madmin.AddServiceAccountReq, string, APIError) {
|
||||
@@ -2407,6 +2500,12 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err)
|
||||
}
|
||||
|
||||
if createReq.Expiration != nil && !createReq.Expiration.IsZero() {
|
||||
// truncate expiration at the second.
|
||||
truncateTime := createReq.Expiration.Truncate(time.Second)
|
||||
createReq.Expiration = &truncateTime
|
||||
}
|
||||
|
||||
// service account access key cannot have space characters beginning and end of the string.
|
||||
if hasSpaceBE(createReq.AccessKey) {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument)
|
||||
@@ -2438,7 +2537,10 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
||||
}
|
||||
|
||||
condValues := getConditionValues(r, "", cred)
|
||||
addExpirationToCondValues(createReq.Expiration, condValues)
|
||||
err = addExpirationToCondValues(createReq.Expiration, condValues)
|
||||
if err != nil {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", toAdminAPIErr(ctx, err)
|
||||
}
|
||||
|
||||
// Check if action is allowed if creating access key for another user
|
||||
// Check if action is explicitly denied if for self
|
||||
|
||||
@@ -39,7 +39,7 @@ import (
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/signer"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
@@ -49,6 +50,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/madmin-go/v3/estream"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/dsync"
|
||||
"github.com/minio/minio/internal/grid"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
@@ -57,9 +59,9 @@ import (
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/secure-io/sio-go"
|
||||
"github.com/zeebo/xxh3"
|
||||
)
|
||||
@@ -1429,7 +1431,7 @@ func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmi
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Get heal status from other peers
|
||||
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus()
|
||||
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus(ctx)
|
||||
var errCount int
|
||||
for _, nerr := range nerrs {
|
||||
if nerr.Err != nil {
|
||||
@@ -1575,7 +1577,8 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
// NetperfHandler - perform mesh style network throughput test
|
||||
func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -1596,6 +1599,15 @@ func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request)
|
||||
ctx = lkctx.Context()
|
||||
defer nsLock.Unlock(lkctx)
|
||||
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
// Unfreeze as soon as request context is canceled or when the function returns.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
}()
|
||||
|
||||
durationStr := r.Form.Get(peerRESTDuration)
|
||||
duration, err := time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
@@ -1616,18 +1628,73 @@ func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
}
|
||||
|
||||
func isAllowedRWAccess(r *http.Request, cred auth.Credentials, bucketName string) (rd, wr bool) {
|
||||
owner := cred.AccessKey == globalActiveCred.AccessKey
|
||||
|
||||
// Set prefix value for "s3:prefix" policy conditionals.
|
||||
r.Header.Set("prefix", "")
|
||||
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
isAllowedAccess := func(bucketName string) (rd, wr bool) {
|
||||
if globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.GetObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
rd = true
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
wr = true
|
||||
}
|
||||
|
||||
return rd, wr
|
||||
}
|
||||
return isAllowedAccess(bucketName)
|
||||
}
|
||||
|
||||
// ObjectSpeedTestHandler - reports maximum speed of a cluster by performing PUT and
|
||||
// GET operations on the server, supports auto tuning by default by automatically
|
||||
// increasing concurrency and stopping when we have reached the limits on the
|
||||
// system.
|
||||
func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
|
||||
objectAPI, creds := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !globalAPIConfig.permitRootAccess() {
|
||||
rd, wr := isAllowedRWAccess(r, creds, globalObjectPerfBucket)
|
||||
if !rd || !wr {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{
|
||||
Code: "XMinioSpeedtestInsufficientPermissions",
|
||||
Message: fmt.Sprintf("%s does not have read and write access to '%s' bucket", creds.AccessKey,
|
||||
globalObjectPerfBucket),
|
||||
StatusCode: http.StatusForbidden,
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
sizeStr := r.Form.Get(peerRESTSize)
|
||||
durationStr := r.Form.Get(peerRESTDuration)
|
||||
concurrentStr := r.Form.Get(peerRESTConcurrent)
|
||||
@@ -1636,6 +1703,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
|
||||
autotune := r.Form.Get("autotune") == "true"
|
||||
noClear := r.Form.Get("noclear") == "true"
|
||||
enableSha256 := r.Form.Get("enableSha256") == "true"
|
||||
enableMultipart := r.Form.Get("enableMultipart") == "true"
|
||||
|
||||
size, err := strconv.Atoi(sizeStr)
|
||||
if err != nil {
|
||||
@@ -1691,8 +1759,11 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
// unfreeze all incoming S3 API calls after speedtest.
|
||||
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
// Unfreeze as soon as request context is canceled or when the function returns.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
}()
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
@@ -1706,6 +1777,8 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
|
||||
storageClass: storageClass,
|
||||
bucketName: customBucket,
|
||||
enableSha256: enableSha256,
|
||||
enableMultipart: enableMultipart,
|
||||
creds: creds,
|
||||
})
|
||||
var prevResult madmin.SpeedTestResult
|
||||
for {
|
||||
@@ -1792,7 +1865,8 @@ func validateObjPerfOptions(ctx context.Context, storageInfo madmin.StorageInfo,
|
||||
|
||||
// DriveSpeedtestHandler - reports throughput of drives available in the cluster
|
||||
func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -1802,8 +1876,11 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
// unfreeze all incoming S3 API calls after speedtest.
|
||||
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
// Unfreeze as soon as request context is canceled or when the function returns.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
}()
|
||||
|
||||
serial := r.Form.Get("serial") == "true"
|
||||
blockSizeStr := r.Form.Get("blocksize")
|
||||
@@ -2096,7 +2173,9 @@ func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
if err := GlobalKMS.CreateKey(ctx, r.Form.Get("key-id")); err != nil {
|
||||
if err := GlobalKMS.CreateKey(ctx, &kms.CreateKeyRequest{
|
||||
Name: r.Form.Get("key-id"),
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -2117,22 +2196,12 @@ func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
stat, err := GlobalKMS.Stat(ctx)
|
||||
stat, err := GlobalKMS.Status(ctx)
|
||||
if err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
status := madmin.KMSStatus{
|
||||
Name: stat.Name,
|
||||
DefaultKeyID: stat.DefaultKey,
|
||||
Endpoints: make(map[string]madmin.ItemState, len(stat.Endpoints)),
|
||||
}
|
||||
for _, endpoint := range stat.Endpoints {
|
||||
status.Endpoints[endpoint] = madmin.ItemOnline // TODO(aead): Implement an online check for mTLS
|
||||
}
|
||||
|
||||
resp, err := json.Marshal(status)
|
||||
resp, err := json.Marshal(stat)
|
||||
if err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
||||
return
|
||||
@@ -2154,15 +2223,9 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
stat, err := GlobalKMS.Stat(ctx)
|
||||
if err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
keyID := r.Form.Get("key-id")
|
||||
if keyID == "" {
|
||||
keyID = stat.DefaultKey
|
||||
keyID = GlobalKMS.DefaultKey
|
||||
}
|
||||
response := madmin.KMSKeyStatus{
|
||||
KeyID: keyID,
|
||||
@@ -2170,7 +2233,10 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
kmsContext := kms.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation
|
||||
// 1. Generate a new key using the KMS.
|
||||
key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsContext)
|
||||
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: keyID,
|
||||
AssociatedData: kmsContext,
|
||||
})
|
||||
if err != nil {
|
||||
response.EncryptionErr = err.Error()
|
||||
resp, err := json.Marshal(response)
|
||||
@@ -2183,7 +2249,11 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// 2. Verify that we can indeed decrypt the (encrypted) key
|
||||
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
|
||||
decryptedKey, err := GlobalKMS.Decrypt(ctx, &kms.DecryptRequest{
|
||||
Name: key.KeyID,
|
||||
Ciphertext: key.Ciphertext,
|
||||
AssociatedData: kmsContext,
|
||||
})
|
||||
if err != nil {
|
||||
response.DecryptionErr = err.Error()
|
||||
resp, err := json.Marshal(response)
|
||||
@@ -2277,7 +2347,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
|
||||
notifyTarget := fetchLambdaInfo()
|
||||
|
||||
local := getLocalServerProperty(globalEndpoints, r, metrics)
|
||||
servers := globalNotificationSys.ServerInfo(metrics)
|
||||
servers := globalNotificationSys.ServerInfo(ctx, metrics)
|
||||
servers = append(servers, local)
|
||||
|
||||
var poolsInfo map[int]map[int]madmin.ErasureSetInfo
|
||||
@@ -2336,8 +2406,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
|
||||
|
||||
domain := globalDomainNames
|
||||
services := madmin.Services{
|
||||
KMS: fetchKMSStatus(),
|
||||
KMSStatus: fetchKMSStatusV2(ctx),
|
||||
KMSStatus: fetchKMSStatus(ctx),
|
||||
LDAP: ldap,
|
||||
Logger: log,
|
||||
Audit: audit,
|
||||
@@ -2347,7 +2416,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
|
||||
return madmin.InfoMessage{
|
||||
Mode: string(mode),
|
||||
Domain: domain,
|
||||
Region: globalSite.Region,
|
||||
Region: globalSite.Region(),
|
||||
SQSARN: globalEventNotifier.GetARNList(false),
|
||||
DeploymentID: globalDeploymentID(),
|
||||
Buckets: buckets,
|
||||
@@ -2375,7 +2444,7 @@ func getKubernetesInfo(dctx context.Context) madmin.KubernetesInfo {
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: globalHealthChkTransport,
|
||||
Transport: globalRemoteTargetTransport,
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
@@ -2947,66 +3016,25 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
return notify
|
||||
}
|
||||
|
||||
// fetchKMSStatus fetches KMS-related status information.
|
||||
func fetchKMSStatus() madmin.KMS {
|
||||
kmsStat := madmin.KMS{}
|
||||
if GlobalKMS == nil {
|
||||
kmsStat.Status = "disabled"
|
||||
return kmsStat
|
||||
}
|
||||
|
||||
stat, err := GlobalKMS.Stat(context.Background())
|
||||
if err != nil {
|
||||
kmsStat.Status = string(madmin.ItemOffline)
|
||||
return kmsStat
|
||||
}
|
||||
if len(stat.Endpoints) == 0 {
|
||||
kmsStat.Status = stat.Name
|
||||
return kmsStat
|
||||
}
|
||||
kmsStat.Status = string(madmin.ItemOnline)
|
||||
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
// 1. Generate a new key using the KMS.
|
||||
key, err := GlobalKMS.GenerateKey(context.Background(), "", kmsContext)
|
||||
if err != nil {
|
||||
kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
|
||||
} else {
|
||||
kmsStat.Encrypt = "success"
|
||||
}
|
||||
|
||||
// 2. Verify that we can indeed decrypt the (encrypted) key
|
||||
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
|
||||
switch {
|
||||
case err != nil:
|
||||
kmsStat.Decrypt = fmt.Sprintf("Decryption failed: %v", err)
|
||||
case subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1:
|
||||
kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key"
|
||||
default:
|
||||
kmsStat.Decrypt = "success"
|
||||
}
|
||||
return kmsStat
|
||||
}
|
||||
|
||||
// fetchKMSStatusV2 fetches KMS-related status information for all instances
|
||||
func fetchKMSStatusV2(ctx context.Context) []madmin.KMS {
|
||||
// fetchKMSStatus fetches KMS-related status information for all instances
|
||||
func fetchKMSStatus(ctx context.Context) []madmin.KMS {
|
||||
if GlobalKMS == nil {
|
||||
return []madmin.KMS{}
|
||||
}
|
||||
|
||||
results := GlobalKMS.Verify(ctx)
|
||||
|
||||
stats := []madmin.KMS{}
|
||||
for _, result := range results {
|
||||
stats = append(stats, madmin.KMS{
|
||||
Status: result.Status,
|
||||
Endpoint: result.Endpoint,
|
||||
Encrypt: result.Encrypt,
|
||||
Decrypt: result.Decrypt,
|
||||
Version: result.Version,
|
||||
})
|
||||
stat, err := GlobalKMS.Status(ctx)
|
||||
if err != nil {
|
||||
kmsLogIf(ctx, err, "failed to fetch KMS status information")
|
||||
return []madmin.KMS{}
|
||||
}
|
||||
|
||||
stats := make([]madmin.KMS, 0, len(stat.Endpoints))
|
||||
for endpoint, state := range stat.Endpoints {
|
||||
stats = append(stats, madmin.KMS{
|
||||
Status: string(state),
|
||||
Endpoint: endpoint,
|
||||
})
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
@@ -3172,11 +3200,11 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
file = strings.ReplaceAll(file, string(os.PathSeparator), "/")
|
||||
|
||||
file = filepath.ToSlash(file)
|
||||
// Reject attempts to traverse parent or absolute paths.
|
||||
if strings.Contains(file, "..") || strings.Contains(volume, "..") {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
if hasBadPathComponent(volume) || hasBadPathComponent(file) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3195,6 +3223,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
}
|
||||
addErr := func(msg string) {}
|
||||
|
||||
// Write a version for making *incompatible* changes.
|
||||
// The AdminClient will reject any version it does not know.
|
||||
@@ -3234,6 +3263,11 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
bugLogIf(ctx, stream.AddError(err.Error()))
|
||||
return
|
||||
}
|
||||
addErr = func(msg string) {
|
||||
inspectZipW.Close()
|
||||
encStream.Close()
|
||||
stream.AddError(msg)
|
||||
}
|
||||
defer encStream.Close()
|
||||
|
||||
inspectZipW = zip.NewWriter(encStream)
|
||||
@@ -3314,18 +3348,6 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err := o.GetRawData(ctx, volume, file, rawDataFn)
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
adminLogIf(ctx, err)
|
||||
}
|
||||
|
||||
// save the format.json as part of inspect by default
|
||||
if !(volume == minioMetaBucket && file == formatConfigFile) {
|
||||
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
|
||||
}
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
adminLogIf(ctx, err)
|
||||
}
|
||||
|
||||
// save args passed to inspect command
|
||||
var sb bytes.Buffer
|
||||
@@ -3338,6 +3360,24 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
sb.WriteString("\n")
|
||||
adminLogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600))
|
||||
|
||||
err := o.GetRawData(ctx, volume, file, rawDataFn)
|
||||
if err != nil {
|
||||
if errors.Is(err, errFileNotFound) {
|
||||
addErr("GetRawData: No files matched the given pattern")
|
||||
return
|
||||
}
|
||||
embedFileInZip(inspectZipW, "GetRawData-err.txt", []byte(err.Error()), 0o600)
|
||||
adminLogIf(ctx, err)
|
||||
}
|
||||
|
||||
// save the format.json as part of inspect by default
|
||||
if !(volume == minioMetaBucket && file == formatConfigFile) {
|
||||
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
|
||||
}
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
adminLogIf(ctx, err)
|
||||
}
|
||||
|
||||
scheme := "https"
|
||||
if !globalIsTLS {
|
||||
scheme = "http"
|
||||
|
||||
@@ -63,8 +63,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
errHealIdleTimeout = errors.New("healing results were not consumed for too long")
|
||||
errHealStopSignalled = errors.New("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
apiErr := toAdminAPIErr(ctx, err)
|
||||
@@ -455,8 +455,8 @@ type healSequence struct {
|
||||
// Number of total items healed against item type
|
||||
healedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// Number of total items where healing failed against endpoint and drive state
|
||||
healFailedItemsMap map[string]int64
|
||||
// Number of total items where healing failed against item type
|
||||
healFailedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// The time of the last scan/heal activity
|
||||
lastHealActivity time.Time
|
||||
@@ -497,7 +497,7 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||
ctx: ctx,
|
||||
scannedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healFailedItemsMap: make(map[string]int64),
|
||||
healFailedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -543,12 +543,12 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
|
||||
|
||||
// getHealFailedItemsMap - returns map of all items where heal failed against
|
||||
// drive endpoint and status
|
||||
func (h *healSequence) getHealFailedItemsMap() map[string]int64 {
|
||||
func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[string]int64, len(h.healFailedItemsMap))
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
|
||||
for k, v := range h.healFailedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
@@ -556,29 +556,27 @@ func (h *healSequence) getHealFailedItemsMap() map[string]int64 {
|
||||
return retMap
|
||||
}
|
||||
|
||||
func (h *healSequence) countFailed(res madmin.HealResultItem) {
|
||||
func (h *healSequence) countFailed(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
for _, d := range res.After.Drives {
|
||||
// For failed items we report the endpoint and drive state
|
||||
// This will help users take corrective actions for drives
|
||||
h.healFailedItemsMap[d.Endpoint+","+d.State]++
|
||||
}
|
||||
|
||||
h.healFailedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
func (h *healSequence) countHeals(healType madmin.HealItemType, healed bool) {
|
||||
func (h *healSequence) countScanned(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
if !healed {
|
||||
h.scannedItemsMap[healType]++
|
||||
} else {
|
||||
h.healedItemsMap[healType]++
|
||||
}
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
func (h *healSequence) countHealed(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.healedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
@@ -734,7 +732,7 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
task.opts.ScanMode = madmin.HealNormalScan
|
||||
}
|
||||
|
||||
h.countHeals(healType, false)
|
||||
h.countScanned(healType)
|
||||
|
||||
if source.noWait {
|
||||
select {
|
||||
@@ -766,6 +764,11 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
// task queued, now wait for the response.
|
||||
select {
|
||||
case res := <-task.respCh:
|
||||
if res.err == nil {
|
||||
h.countHealed(healType)
|
||||
} else {
|
||||
h.countFailed(healType)
|
||||
}
|
||||
if !h.reportProgress {
|
||||
if errors.Is(res.err, errSkipFile) { // this is only sent usually by nopHeal
|
||||
return nil
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@@ -18,7 +18,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -31,6 +30,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
)
|
||||
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
@@ -64,9 +64,11 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
} else {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
// log once the error
|
||||
peersLogOnceIf(context.Background(), err, nodeName)
|
||||
if xnet.IsNetworkOrHostDown(err, false) {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
} else if xnet.IsNetworkOrHostDown(err, true) {
|
||||
network[nodeName] = "connection attempt timedout"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ type ObjectToDelete struct {
|
||||
ReplicateDecisionStr string `xml:"-"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
// createBucketLocationConfiguration container for bucket configuration request from client.
|
||||
// Used for parsing the location from the request body for Makebucket.
|
||||
type createBucketLocationConfiguration struct {
|
||||
XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"`
|
||||
|
||||
@@ -48,7 +48,7 @@ import (
|
||||
levent "github.com/minio/minio/internal/config/lambda/event"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// APIError structure
|
||||
@@ -56,19 +56,23 @@ type APIError struct {
|
||||
Code string
|
||||
Description string
|
||||
HTTPStatusCode int
|
||||
ObjectSize string
|
||||
RangeRequested string
|
||||
}
|
||||
|
||||
// APIErrorResponse - error response format
|
||||
type APIErrorResponse struct {
|
||||
XMLName xml.Name `xml:"Error" json:"-"`
|
||||
Code string
|
||||
Message string
|
||||
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
|
||||
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
|
||||
Resource string
|
||||
Region string `xml:"Region,omitempty" json:"Region,omitempty"`
|
||||
RequestID string `xml:"RequestId" json:"RequestId"`
|
||||
HostID string `xml:"HostId" json:"HostId"`
|
||||
XMLName xml.Name `xml:"Error" json:"-"`
|
||||
Code string
|
||||
Message string
|
||||
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
|
||||
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
|
||||
Resource string
|
||||
Region string `xml:"Region,omitempty" json:"Region,omitempty"`
|
||||
RequestID string `xml:"RequestId" json:"RequestId"`
|
||||
HostID string `xml:"HostId" json:"HostId"`
|
||||
ActualObjectSize string `xml:"ActualObjectSize,omitempty" json:"ActualObjectSize,omitempty"`
|
||||
RangeRequested string `xml:"RangeRequested,omitempty" json:"RangeRequested,omitempty"`
|
||||
}
|
||||
|
||||
// APIErrorCode type of error status.
|
||||
@@ -263,6 +267,7 @@ const (
|
||||
ErrInvalidResourceName
|
||||
ErrInvalidLifecycleQueryParameter
|
||||
ErrServerNotInitialized
|
||||
ErrBucketMetadataNotInitialized
|
||||
ErrRequestTimedout
|
||||
ErrClientDisconnected
|
||||
ErrTooManyRequests
|
||||
@@ -282,6 +287,7 @@ const (
|
||||
ErrAdminNoSuchGroup
|
||||
ErrAdminGroupNotEmpty
|
||||
ErrAdminGroupDisabled
|
||||
ErrAdminInvalidGroupName
|
||||
ErrAdminNoSuchJob
|
||||
ErrAdminNoSuchPolicy
|
||||
ErrAdminPolicyChangeAlreadyApplied
|
||||
@@ -420,6 +426,7 @@ const (
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAddUserValidUTF
|
||||
ErrAdminResourceInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrAccountNotEligible
|
||||
@@ -438,6 +445,8 @@ const (
|
||||
ErrAdminNoAccessKey
|
||||
ErrAdminNoSecretKey
|
||||
|
||||
ErrIAMNotInitialized
|
||||
|
||||
apiErrCodeEnd // This is used only for the testing code
|
||||
)
|
||||
|
||||
@@ -451,9 +460,9 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError
|
||||
if err != nil {
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
}
|
||||
if globalSite.Region != "" {
|
||||
if region := globalSite.Region(); region != "" {
|
||||
if errCode == ErrAuthorizationHeaderMalformed {
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", region)
|
||||
return apiErr
|
||||
}
|
||||
}
|
||||
@@ -960,7 +969,7 @@ var errorCodes = errorCodeMap{
|
||||
ErrReplicationRemoteConnectionError: {
|
||||
Code: "XMinioAdminReplicationRemoteConnectionError",
|
||||
Description: "Remote service connection error",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrReplicationBandwidthLimitError: {
|
||||
Code: "XMinioAdminReplicationBandwidthLimitError",
|
||||
@@ -1295,7 +1304,17 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrServerNotInitialized: {
|
||||
Code: "XMinioServerNotInitialized",
|
||||
Description: "Server not initialized, please try again.",
|
||||
Description: "Server not initialized yet, please try again.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrIAMNotInitialized: {
|
||||
Code: "XMinioIAMNotInitialized",
|
||||
Description: "IAM sub-system not initialized yet, please try again.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrBucketMetadataNotInitialized: {
|
||||
Code: "XMinioBucketMetadataNotInitialized",
|
||||
Description: "Bucket metadata not initialized yet, please try again.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrMalformedJSON: {
|
||||
@@ -1468,7 +1487,7 @@ var errorCodes = errorCodeMap{
|
||||
ErrTooManyRequests: {
|
||||
Code: "TooManyRequests",
|
||||
Description: "Deadline exceeded while waiting in incoming queue, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
HTTPStatusCode: http.StatusTooManyRequests,
|
||||
},
|
||||
ErrUnsupportedMetadata: {
|
||||
Code: "InvalidArgument",
|
||||
@@ -2091,6 +2110,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Expected LDAP short username but was given full DN.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminInvalidGroupName: {
|
||||
Code: "XMinioInvalidGroupName",
|
||||
Description: "The group name is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAddUserValidUTF: {
|
||||
Code: "XMinioInvalidUTF",
|
||||
Description: "Invalid UTF-8 character detected.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
}
|
||||
|
||||
// toAPIErrorCode - Converts embedded errors. Convenience
|
||||
@@ -2130,6 +2159,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrAdminNoSuchGroup
|
||||
case errGroupNotEmpty:
|
||||
apiErr = ErrAdminGroupNotEmpty
|
||||
case errGroupNameContainsReservedChars:
|
||||
apiErr = ErrAdminInvalidGroupName
|
||||
case errNoSuchJob:
|
||||
apiErr = ErrAdminNoSuchJob
|
||||
case errNoPolicyToAttachOrDetach:
|
||||
@@ -2144,6 +2175,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrEntityTooSmall
|
||||
case errAuthentication:
|
||||
apiErr = ErrAccessDenied
|
||||
case auth.ErrContainsReservedChars:
|
||||
apiErr = ErrAdminInvalidAccessKey
|
||||
case auth.ErrInvalidAccessKeyLength:
|
||||
apiErr = ErrAdminInvalidAccessKey
|
||||
case auth.ErrInvalidSecretKeyLength:
|
||||
@@ -2211,6 +2244,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrInvalidMaxParts
|
||||
case ioutil.ErrOverread:
|
||||
apiErr = ErrExcessData
|
||||
case errServerNotInitialized:
|
||||
apiErr = ErrServerNotInitialized
|
||||
case errBucketMetadataNotInitialized:
|
||||
apiErr = ErrBucketMetadataNotInitialized
|
||||
}
|
||||
|
||||
// Compression errors
|
||||
@@ -2402,10 +2439,9 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
switch apiErr.Code {
|
||||
case "NotImplemented":
|
||||
desc := fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
apiErr = APIError{
|
||||
Code: apiErr.Code,
|
||||
Description: desc,
|
||||
Description: fmt.Sprintf("%s (%v)", apiErr.Description, err),
|
||||
HTTPStatusCode: apiErr.HTTPStatusCode,
|
||||
}
|
||||
case "XMinioBackendDown":
|
||||
@@ -2417,12 +2453,24 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
switch e := err.(type) {
|
||||
case kms.Error:
|
||||
apiErr = APIError{
|
||||
Description: e.Err.Error(),
|
||||
Code: e.APICode,
|
||||
HTTPStatusCode: e.HTTPStatusCode,
|
||||
Description: e.Err,
|
||||
HTTPStatusCode: e.Code,
|
||||
}
|
||||
case batchReplicationJobError:
|
||||
apiErr = APIError(e)
|
||||
apiErr = APIError{
|
||||
Description: e.Description,
|
||||
Code: e.Code,
|
||||
HTTPStatusCode: e.HTTPStatusCode,
|
||||
}
|
||||
case InvalidRange:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidRange",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: errorCodes[ErrInvalidRange].HTTPStatusCode,
|
||||
ObjectSize: strconv.FormatInt(e.ResourceSize, 10),
|
||||
RangeRequested: fmt.Sprintf("%d-%d", e.OffsetBegin, e.OffsetEnd),
|
||||
}
|
||||
case InvalidArgument:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidArgument",
|
||||
@@ -2549,13 +2597,15 @@ func getAPIError(code APIErrorCode) APIError {
|
||||
func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID, hostID string) APIErrorResponse {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
return APIErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalSite.Region,
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalSite.Region(),
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
ActualObjectSize: err.ObjectSize,
|
||||
RangeRequested: err.RangeRequested,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
@@ -53,7 +54,7 @@ func setCommonHeaders(w http.ResponseWriter) {
|
||||
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalSite.Region; region != "" {
|
||||
if region := globalSite.Region(); region != "" {
|
||||
w.Header().Set(xhttp.AmzBucketRegion, region)
|
||||
}
|
||||
w.Header().Set(xhttp.AcceptRanges, "bytes")
|
||||
@@ -107,7 +108,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
@@ -135,7 +136,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
// Set tag count if object has tags
|
||||
if len(objInfo.UserTags) > 0 {
|
||||
tags, _ := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if tags.Count() > 0 {
|
||||
if tags != nil && tags.Count() > 0 {
|
||||
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tags.Count())}
|
||||
if opts.Tagging {
|
||||
// This is MinIO only extension to return back tags along with the count.
|
||||
@@ -212,7 +213,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
if objInfo.IsRemote() {
|
||||
// Check if object is being restored. For more information on x-amz-restore header see
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseSyntax
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.TransitionedObject.Tier}
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{filterStorageClass(ctx, objInfo.TransitionedObject.Tier)}
|
||||
}
|
||||
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
|
||||
@@ -35,7 +35,7 @@ import (
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
xxml "github.com/minio/xxml"
|
||||
)
|
||||
|
||||
@@ -544,7 +544,7 @@ func cleanReservedKeys(metadata map[string]string) map[string]string {
|
||||
}
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
|
||||
func generateListVersionsResponse(ctx context.Context, bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
|
||||
owner := &Owner{
|
||||
@@ -573,7 +573,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
@@ -634,7 +634,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
}
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
func generateListObjectsV1Response(ctx context.Context, bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
owner := &Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
@@ -654,7 +654,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
@@ -683,7 +683,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
}
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
|
||||
func generateListObjectsV2Response(ctx context.Context, bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner *Owner
|
||||
if fetchOwner {
|
||||
@@ -707,7 +707,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
@@ -789,8 +789,8 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi
|
||||
}
|
||||
|
||||
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
|
||||
func generateCompleteMultpartUploadResponse(bucket, key, location string, oi ObjectInfo) CompleteMultipartUploadResponse {
|
||||
cs := oi.decryptChecksums(0)
|
||||
func generateCompleteMultipartUploadResponse(bucket, key, location string, oi ObjectInfo, h http.Header) CompleteMultipartUploadResponse {
|
||||
cs := oi.decryptChecksums(0, h)
|
||||
c := CompleteMultipartUploadResponse{
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
@@ -954,9 +954,9 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
|
||||
switch err.Code {
|
||||
case "InvalidRegion":
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region)
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region())
|
||||
case "AuthorizationHeaderMalformed":
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region())
|
||||
}
|
||||
|
||||
// Similar check to http.checkWriteHeaderCode
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
consoleapi "github.com/minio/console/api"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
@@ -64,7 +64,7 @@ func setObjectLayer(o ObjectLayer) {
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
// objectAPIHandlers implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -41,7 +41,7 @@ import (
|
||||
xjwt "github.com/minio/minio/internal/jwt"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/mcontext"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Verify if request has JWT.
|
||||
@@ -178,7 +178,7 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
||||
|
||||
logger.GetReqInfo(ctx).Cred = cred
|
||||
logger.GetReqInfo(ctx).Owner = owner
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region()
|
||||
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
@@ -368,7 +368,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
switch action {
|
||||
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
|
||||
region = ""
|
||||
@@ -384,7 +384,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
||||
|
||||
logger.GetReqInfo(ctx).Cred = cred
|
||||
logger.GetReqInfo(ctx).Owner = owner
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region()
|
||||
|
||||
// region is valid only for CreateBucketAction.
|
||||
var region string
|
||||
@@ -684,7 +684,7 @@ func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool,
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
@@ -745,7 +745,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
switch atype {
|
||||
case authTypeUnknown:
|
||||
return ErrSignatureVersionNotSupported
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
type nullReader struct{}
|
||||
@@ -403,7 +403,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
|
||||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region(), serviceS3)
|
||||
if s3Error != testCase.s3Error {
|
||||
if _, err := io.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
|
||||
@@ -443,7 +443,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region()); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
// healTask represents what to heal along with options
|
||||
@@ -133,19 +133,20 @@ func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer, bgSeq *
|
||||
}
|
||||
}
|
||||
|
||||
if bgSeq != nil {
|
||||
// We increment relevant counter based on the heal result for prometheus reporting.
|
||||
if err != nil {
|
||||
bgSeq.countFailed(res)
|
||||
} else {
|
||||
bgSeq.countHeals(res.Type, false)
|
||||
}
|
||||
}
|
||||
|
||||
if task.respCh != nil {
|
||||
task.respCh <- healResult{result: res, err: err}
|
||||
continue
|
||||
}
|
||||
|
||||
// when respCh is not set caller is not waiting but we
|
||||
// update the relevant metrics for them
|
||||
if bgSeq != nil {
|
||||
if err == nil {
|
||||
bgSeq.countHealed(res.Type)
|
||||
} else {
|
||||
bgSeq.countFailed(res.Type)
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -141,14 +141,14 @@ func initHealingTracker(disk StorageAPI, healID string) *healingTracker {
|
||||
return h
|
||||
}
|
||||
|
||||
func (h healingTracker) getLastUpdate() time.Time {
|
||||
func (h *healingTracker) getLastUpdate() time.Time {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
return h.LastUpdate
|
||||
}
|
||||
|
||||
func (h healingTracker) getBucket() string {
|
||||
func (h *healingTracker) getBucket() string {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
@@ -162,7 +162,7 @@ func (h *healingTracker) setBucket(bucket string) {
|
||||
h.Bucket = bucket
|
||||
}
|
||||
|
||||
func (h healingTracker) getObject() string {
|
||||
func (h *healingTracker) getObject() string {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
@@ -453,10 +453,6 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
|
||||
if len(tracker.QueuedBuckets) > 0 {
|
||||
return fmt.Errorf("not all buckets were healed: %v", tracker.QueuedBuckets)
|
||||
}
|
||||
|
||||
if serverDebugLog {
|
||||
tracker.printTo(os.Stdout)
|
||||
fmt.Printf("\n")
|
||||
|
||||
@@ -33,9 +33,9 @@ import (
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
@@ -432,7 +432,7 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
})
|
||||
if err != nil {
|
||||
stopFn(exp, err)
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts))
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s due to %v (attempts=%d)", exp.Bucket, exp.Name, err, attempts))
|
||||
} else {
|
||||
stopFn(exp, err)
|
||||
success = true
|
||||
@@ -464,25 +464,25 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
copy(toDelCopy, toDel)
|
||||
var failed int
|
||||
errs := r.Expire(ctx, api, vc, toDel)
|
||||
// reslice toDel in preparation for next retry
|
||||
// attempt
|
||||
// reslice toDel in preparation for next retry attempt
|
||||
toDel = toDel[:0]
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
stopFn(toDelCopy[i], err)
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts))
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID,
|
||||
err, attempts))
|
||||
failed++
|
||||
if attempts == retryAttempts { // all retry attempts failed, record failure
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, false)
|
||||
}
|
||||
} else {
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, false, attempts)
|
||||
}
|
||||
if attempts != retryAttempts {
|
||||
// retry
|
||||
toDel = append(toDel, toDelCopy[i])
|
||||
}
|
||||
} else {
|
||||
stopFn(toDelCopy[i], nil)
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, true)
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, true, attempts)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -537,7 +537,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
results := make(chan ObjectInfo, workerSize)
|
||||
results := make(chan itemOrErr[ObjectInfo], workerSize)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
|
||||
@@ -584,11 +584,18 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
versionsCount int
|
||||
toDel []expireObjInfo
|
||||
)
|
||||
failed := true
|
||||
for result := range results {
|
||||
if result.Err != nil {
|
||||
failed = true
|
||||
batchLogIf(ctx, result.Err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply filter to find the matching rule to apply expiry
|
||||
// actions accordingly.
|
||||
// nolint:gocritic
|
||||
if result.IsLatest {
|
||||
if result.Item.IsLatest {
|
||||
// send down filtered entries to be deleted using
|
||||
// DeleteObjects method
|
||||
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
|
||||
@@ -609,7 +616,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
var match BatchJobExpireFilter
|
||||
var found bool
|
||||
for _, rule := range r.Rules {
|
||||
if rule.Matches(result, now) {
|
||||
if rule.Matches(result.Item, now) {
|
||||
match = rule
|
||||
found = true
|
||||
break
|
||||
@@ -619,18 +626,18 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
continue
|
||||
}
|
||||
|
||||
prevObj = result
|
||||
prevObj = result.Item
|
||||
matchedFilter = match
|
||||
versionsCount = 1
|
||||
// Include the latest version
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result,
|
||||
ObjectInfo: result.Item,
|
||||
ExpireAll: true,
|
||||
})
|
||||
continue
|
||||
}
|
||||
} else if prevObj.Name == result.Name {
|
||||
} else if prevObj.Name == result.Item.Name {
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
continue // including latest version in toDel suffices, skipping other versions
|
||||
}
|
||||
@@ -643,7 +650,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
continue // retain versions
|
||||
}
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result,
|
||||
ObjectInfo: result.Item,
|
||||
})
|
||||
}
|
||||
// Send any remaining objects downstream
|
||||
@@ -658,8 +665,8 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
<-expireDoneCh // waits for the expire goroutine to complete
|
||||
wk.Wait() // waits for all expire workers to retire
|
||||
|
||||
ri.Complete = ri.ObjectsFailed == 0
|
||||
ri.Failed = ri.ObjectsFailed > 0
|
||||
ri.Complete = !failed && ri.ObjectsFailed == 0
|
||||
ri.Failed = failed || ri.ObjectsFailed > 0
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
|
||||
// Close the saverQuitCh - this also triggers saving in-memory state
|
||||
|
||||
@@ -48,10 +48,10 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/pkg/v2/console"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"github.com/minio/pkg/v3/console"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
@@ -447,7 +447,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
} else {
|
||||
stopFn(oi, nil)
|
||||
}
|
||||
ri.trackCurrentBucketObject(r.Target.Bucket, oi, success)
|
||||
ri.trackCurrentBucketObject(r.Target.Bucket, oi, success, attempts)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
@@ -690,6 +690,7 @@ type batchJobInfo struct {
|
||||
StartTime time.Time `json:"startTime" msg:"st"`
|
||||
LastUpdate time.Time `json:"lastUpdate" msg:"lu"`
|
||||
RetryAttempts int `json:"retryAttempts" msg:"ra"`
|
||||
Attempts int `json:"attempts" msg:"at"`
|
||||
|
||||
Complete bool `json:"complete" msg:"cmp"`
|
||||
Failed bool `json:"failed" msg:"fld"`
|
||||
@@ -833,13 +834,15 @@ func (ri *batchJobInfo) clone() *batchJobInfo {
|
||||
ObjectsFailed: ri.ObjectsFailed,
|
||||
BytesTransferred: ri.BytesTransferred,
|
||||
BytesFailed: ri.BytesFailed,
|
||||
Attempts: ri.Attempts,
|
||||
}
|
||||
}
|
||||
|
||||
func (ri *batchJobInfo) countItem(size int64, dmarker, success bool) {
|
||||
func (ri *batchJobInfo) countItem(size int64, dmarker, success bool, attempt int) {
|
||||
if ri == nil {
|
||||
return
|
||||
}
|
||||
ri.Attempts++
|
||||
if success {
|
||||
if dmarker {
|
||||
ri.DeleteMarkers++
|
||||
@@ -847,7 +850,19 @@ func (ri *batchJobInfo) countItem(size int64, dmarker, success bool) {
|
||||
ri.Objects++
|
||||
ri.BytesTransferred += size
|
||||
}
|
||||
if attempt > 1 {
|
||||
if dmarker {
|
||||
ri.DeleteMarkersFailed--
|
||||
} else {
|
||||
ri.ObjectsFailed--
|
||||
ri.BytesFailed += size
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if attempt > 1 {
|
||||
// Only count first attempt
|
||||
return
|
||||
}
|
||||
if dmarker {
|
||||
ri.DeleteMarkersFailed++
|
||||
} else {
|
||||
@@ -921,7 +936,7 @@ func (ri *batchJobInfo) trackMultipleObjectVersions(bucket string, info ObjectIn
|
||||
}
|
||||
}
|
||||
|
||||
func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, success bool) {
|
||||
func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, success bool, attempt int) {
|
||||
if ri == nil {
|
||||
return
|
||||
}
|
||||
@@ -931,7 +946,7 @@ func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo,
|
||||
|
||||
ri.Bucket = bucket
|
||||
ri.Object = info.Name
|
||||
ri.countItem(info.Size, info.DeleteMarker, success)
|
||||
ri.countItem(info.Size, info.DeleteMarker, success, attempt)
|
||||
}
|
||||
|
||||
func (ri *batchJobInfo) trackCurrentBucketBatch(bucket string, batch []ObjectInfo) {
|
||||
@@ -945,7 +960,7 @@ func (ri *batchJobInfo) trackCurrentBucketBatch(bucket string, batch []ObjectInf
|
||||
ri.Bucket = bucket
|
||||
for i := range batch {
|
||||
ri.Object = batch[i].Name
|
||||
ri.countItem(batch[i].Size, batch[i].DeleteMarker, true)
|
||||
ri.countItem(batch[i].Size, batch[i].DeleteMarker, true, 1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1057,8 +1072,8 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
|
||||
|
||||
var (
|
||||
walkCh = make(chan ObjectInfo, 100)
|
||||
slowCh = make(chan ObjectInfo, 100)
|
||||
walkCh = make(chan itemOrErr[ObjectInfo], 100)
|
||||
slowCh = make(chan itemOrErr[ObjectInfo], 100)
|
||||
)
|
||||
|
||||
if !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() {
|
||||
@@ -1084,7 +1099,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
if err := r.writeAsArchive(ctx, api, cl, batch); err != nil {
|
||||
batchLogIf(ctx, err)
|
||||
for _, b := range batch {
|
||||
slowCh <- b
|
||||
slowCh <- itemOrErr[ObjectInfo]{Item: b}
|
||||
}
|
||||
} else {
|
||||
ri.trackCurrentBucketBatch(r.Source.Bucket, batch)
|
||||
@@ -1095,12 +1110,12 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
}
|
||||
}
|
||||
for obj := range walkCh {
|
||||
if obj.DeleteMarker || !obj.VersionPurgeStatus.Empty() || obj.Size >= int64(smallerThan) {
|
||||
if obj.Item.DeleteMarker || !obj.Item.VersionPurgeStatus.Empty() || obj.Item.Size >= int64(smallerThan) {
|
||||
slowCh <- obj
|
||||
continue
|
||||
}
|
||||
|
||||
batch = append(batch, obj)
|
||||
batch = append(batch, obj.Item)
|
||||
|
||||
if len(batch) < *r.Source.Snowball.Batch {
|
||||
continue
|
||||
@@ -1153,8 +1168,13 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
prevObj := ""
|
||||
|
||||
skipReplicate := false
|
||||
for result := range slowCh {
|
||||
result := result
|
||||
for res := range slowCh {
|
||||
if res.Err != nil {
|
||||
ri.Failed = true
|
||||
batchLogIf(ctx, res.Err)
|
||||
continue
|
||||
}
|
||||
result := res.Item
|
||||
if result.Name != prevObj {
|
||||
prevObj = result.Name
|
||||
skipReplicate = result.DeleteMarker && s3Type
|
||||
@@ -1183,7 +1203,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
} else {
|
||||
stopFn(result, nil)
|
||||
}
|
||||
ri.trackCurrentBucketObject(r.Source.Bucket, result, success)
|
||||
ri.trackCurrentBucketObject(r.Source.Bucket, result, success, attempts)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
@@ -1231,6 +1251,7 @@ type batchReplicationJobError struct {
|
||||
Code string
|
||||
Description string
|
||||
HTTPStatusCode int
|
||||
ObjectSize int64
|
||||
}
|
||||
|
||||
func (e batchReplicationJobError) Error() string {
|
||||
@@ -1247,9 +1268,18 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
if r.Source.Bucket == "" {
|
||||
if r.Source.Endpoint != "" && r.Target.Endpoint != "" {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
if r.Source.Bucket == "" || r.Target.Bucket == "" {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
var isRemoteToLocal bool
|
||||
localBkt := r.Source.Bucket
|
||||
if r.Source.Endpoint != "" {
|
||||
@@ -1274,9 +1304,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
if err := r.Source.Snowball.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
if !r.Source.Creds.Empty() {
|
||||
if err := r.Source.Creds.Validate(); err != nil {
|
||||
@@ -1298,9 +1325,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
if r.Target.Endpoint != "" && !r.Target.Type.isMinio() && !r.Target.ValidPath() {
|
||||
return errInvalidArgument
|
||||
}
|
||||
if r.Target.Bucket == "" {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
if !r.Target.Creds.Empty() {
|
||||
if err := r.Target.Creds.Validate(); err != nil {
|
||||
@@ -1308,10 +1332,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
if err := r.Target.Type.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1456,7 +1476,7 @@ func (j *BatchJobRequest) load(ctx context.Context, api ObjectLayer, name string
|
||||
|
||||
func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
|
||||
// TODO: support custom storage class for remote replication
|
||||
putOpts, err = putReplicationOpts(ctx, "", objInfo)
|
||||
putOpts, err = putReplicationOpts(ctx, "", objInfo, 0)
|
||||
if err != nil {
|
||||
return putOpts, err
|
||||
}
|
||||
@@ -1484,7 +1504,7 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
|
||||
jobType = string(madmin.BatchJobReplicate)
|
||||
}
|
||||
|
||||
resultCh := make(chan ObjectInfo)
|
||||
resultCh := make(chan itemOrErr[ObjectInfo])
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
@@ -1496,8 +1516,12 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
listResult := madmin.ListBatchJobsResult{}
|
||||
for result := range resultCh {
|
||||
if result.Err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, result.Err), r.URL)
|
||||
return
|
||||
}
|
||||
req := &BatchJobRequest{}
|
||||
if err := req.load(ctx, objectAPI, result.Name); err != nil {
|
||||
if err := req.load(ctx, objectAPI, result.Item.Name); err != nil {
|
||||
if !errors.Is(err, errNoSuchJob) {
|
||||
batchLogIf(ctx, err)
|
||||
}
|
||||
@@ -1609,7 +1633,7 @@ func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
job.ID = fmt.Sprintf("%s:%d", shortuuid.New(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
job.ID = fmt.Sprintf("%s%s%d", shortuuid.New(), getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
job.User = user
|
||||
job.Started = time.Now()
|
||||
|
||||
@@ -1702,7 +1726,7 @@ func newBatchJobPool(ctx context.Context, o ObjectLayer, workers int) *BatchJobP
|
||||
}
|
||||
|
||||
func (j *BatchJobPool) resume() {
|
||||
results := make(chan ObjectInfo, 100)
|
||||
results := make(chan itemOrErr[ObjectInfo], 100)
|
||||
ctx, cancel := context.WithCancel(j.ctx)
|
||||
defer cancel()
|
||||
if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobPrefix, results, WalkOptions{}); err != nil {
|
||||
@@ -1710,12 +1734,16 @@ func (j *BatchJobPool) resume() {
|
||||
return
|
||||
}
|
||||
for result := range results {
|
||||
if result.Err != nil {
|
||||
batchLogIf(j.ctx, result.Err)
|
||||
continue
|
||||
}
|
||||
// ignore batch-replicate.bin and batch-rotate.bin entries
|
||||
if strings.HasSuffix(result.Name, slashSeparator) {
|
||||
if strings.HasSuffix(result.Item.Name, slashSeparator) {
|
||||
continue
|
||||
}
|
||||
req := &BatchJobRequest{}
|
||||
if err := req.load(ctx, j.objLayer, result.Name); err != nil {
|
||||
if err := req.load(ctx, j.objLayer, result.Item.Name); err != nil {
|
||||
batchLogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -419,6 +419,12 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "at":
|
||||
z.Attempts, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Attempts")
|
||||
return
|
||||
}
|
||||
case "cmp":
|
||||
z.Complete, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
@@ -492,9 +498,9 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 16
|
||||
// map header, size 17
|
||||
// write "v"
|
||||
err = en.Append(0xde, 0x0, 0x10, 0xa1, 0x76)
|
||||
err = en.Append(0xde, 0x0, 0x11, 0xa1, 0x76)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -553,6 +559,16 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
// write "at"
|
||||
err = en.Append(0xa2, 0x61, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.Attempts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Attempts")
|
||||
return
|
||||
}
|
||||
// write "cmp"
|
||||
err = en.Append(0xa3, 0x63, 0x6d, 0x70)
|
||||
if err != nil {
|
||||
@@ -659,9 +675,9 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 16
|
||||
// map header, size 17
|
||||
// string "v"
|
||||
o = append(o, 0xde, 0x0, 0x10, 0xa1, 0x76)
|
||||
o = append(o, 0xde, 0x0, 0x11, 0xa1, 0x76)
|
||||
o = msgp.AppendInt(o, z.Version)
|
||||
// string "jid"
|
||||
o = append(o, 0xa3, 0x6a, 0x69, 0x64)
|
||||
@@ -678,6 +694,9 @@ func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "ra"
|
||||
o = append(o, 0xa2, 0x72, 0x61)
|
||||
o = msgp.AppendInt(o, z.RetryAttempts)
|
||||
// string "at"
|
||||
o = append(o, 0xa2, 0x61, 0x74)
|
||||
o = msgp.AppendInt(o, z.Attempts)
|
||||
// string "cmp"
|
||||
o = append(o, 0xa3, 0x63, 0x6d, 0x70)
|
||||
o = msgp.AppendBool(o, z.Complete)
|
||||
@@ -765,6 +784,12 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "at":
|
||||
z.Attempts, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Attempts")
|
||||
return
|
||||
}
|
||||
case "cmp":
|
||||
z.Complete, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
@@ -839,6 +864,6 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *batchJobInfo) Msgsize() (s int) {
|
||||
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
|
||||
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
|
||||
return
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
|
||||
@@ -33,8 +33,8 @@ import (
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
)
|
||||
|
||||
// keyrotate:
|
||||
@@ -95,6 +95,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
|
||||
if e.Type == ssekms && spaces {
|
||||
return crypto.ErrInvalidEncryptionKeyID
|
||||
}
|
||||
|
||||
if e.Type == ssekms && GlobalKMS != nil {
|
||||
ctx := kms.Context{}
|
||||
if e.Context != "" {
|
||||
@@ -113,7 +114,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
|
||||
e.kmsContext[k] = v
|
||||
}
|
||||
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
|
||||
if _, err := GlobalKMS.GenerateKey(GlobalContext, e.Key, ctx); err != nil {
|
||||
if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -356,7 +357,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
retryAttempts := ri.RetryAttempts
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
results := make(chan ObjectInfo, 100)
|
||||
results := make(chan itemOrErr[ObjectInfo], 100)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
Filter: selectObj,
|
||||
@@ -365,9 +366,14 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
return err
|
||||
}
|
||||
|
||||
for result := range results {
|
||||
result := result
|
||||
failed := false
|
||||
for res := range results {
|
||||
if res.Err != nil {
|
||||
failed = true
|
||||
batchLogIf(ctx, res.Err)
|
||||
break
|
||||
}
|
||||
result := res.Item
|
||||
sseKMS := crypto.S3KMS.IsEncrypted(result.UserDefined)
|
||||
sseS3 := crypto.S3.IsEncrypted(result.UserDefined)
|
||||
if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed
|
||||
@@ -377,7 +383,6 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
go func() {
|
||||
defer wk.Give()
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
attempts := attempts
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricKeyRotation, job.ID, attempts)
|
||||
success := true
|
||||
if err := r.KeyRotate(ctx, api, result); err != nil {
|
||||
@@ -387,8 +392,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
} else {
|
||||
stopFn(result, nil)
|
||||
}
|
||||
ri.trackCurrentBucketObject(r.Bucket, result, success)
|
||||
ri.RetryAttempts = attempts
|
||||
ri.trackCurrentBucketObject(r.Bucket, result, success, attempts)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
@@ -407,8 +411,8 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
}
|
||||
wk.Wait()
|
||||
|
||||
ri.Complete = ri.ObjectsFailed == 0
|
||||
ri.Failed = ri.ObjectsFailed > 0
|
||||
ri.Complete = !failed && ri.ObjectsFailed == 0
|
||||
ri.Failed = failed || ri.ObjectsFailed > 0
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
@@ -475,8 +479,5 @@ func (r *BatchJobKeyRotateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.Flags.Retry.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return r.Flags.Retry.Validate()
|
||||
}
|
||||
|
||||
@@ -26,15 +26,17 @@ import (
|
||||
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/ringbuffer"
|
||||
)
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow io.WriteCloser
|
||||
closeWithErr func(err error) error
|
||||
closeWithErr func(err error)
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
canClose *sync.WaitGroup
|
||||
byteBuf []byte
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
@@ -62,7 +64,10 @@ func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Close() error {
|
||||
// Close the underlying writer.
|
||||
// This will also flush the ring buffer if used.
|
||||
err := b.iow.Close()
|
||||
|
||||
// Wait for all data to be written before returning else it causes race conditions.
|
||||
// Race condition is because of io.PipeWriter implementation. i.e consider the following
|
||||
// sequent of operations:
|
||||
@@ -73,29 +78,34 @@ func (b *streamingBitrotWriter) Close() error {
|
||||
if b.canClose != nil {
|
||||
b.canClose.Wait()
|
||||
}
|
||||
|
||||
// Recycle the buffer.
|
||||
if b.byteBuf != nil {
|
||||
globalBytePoolCap.Load().Put(b.byteBuf)
|
||||
b.byteBuf = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// newStreamingBitrotWriterBuffer returns streaming bitrot writer implementation.
|
||||
// The output is written to the supplied writer w.
|
||||
func newStreamingBitrotWriterBuffer(w io.Writer, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) error {
|
||||
// Similar to CloseWithError on pipes we always return nil.
|
||||
return nil
|
||||
}}
|
||||
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) {}}
|
||||
}
|
||||
|
||||
// Returns streaming bitrot writer implementation.
|
||||
func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
r, w := io.Pipe()
|
||||
h := algo.New()
|
||||
buf := globalBytePoolCap.Load().Get()
|
||||
rb := ringbuffer.NewBuffer(buf[:cap(buf)]).SetBlocking(true)
|
||||
|
||||
bw := &streamingBitrotWriter{
|
||||
iow: ioutil.NewDeadlineWriter(w, globalDriveConfig.GetMaxTimeout()),
|
||||
closeWithErr: w.CloseWithError,
|
||||
iow: ioutil.NewDeadlineWriter(rb.WriteCloser(), globalDriveConfig.GetMaxTimeout()),
|
||||
closeWithErr: rb.CloseWithError,
|
||||
h: h,
|
||||
shardSize: shardSize,
|
||||
canClose: &sync.WaitGroup{},
|
||||
byteBuf: buf,
|
||||
}
|
||||
bw.canClose.Add(1)
|
||||
go func() {
|
||||
@@ -106,7 +116,7 @@ func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath stri
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
r.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, r))
|
||||
rb.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, rb))
|
||||
}()
|
||||
return bw
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/grid"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
// To abstract a node over network.
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -85,7 +85,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
|
||||
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{Name: kmsKey, AssociatedData: kmsContext})
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errKMSKeyNotFound), r.URL)
|
||||
|
||||
@@ -61,8 +61,8 @@ import (
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/sync/errgroup"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -72,6 +72,8 @@ const (
|
||||
|
||||
xMinIOErrCodeHeader = "x-minio-error-code"
|
||||
xMinIOErrDescHeader = "x-minio-error-desc"
|
||||
|
||||
postPolicyBucketTagging = "tagging"
|
||||
)
|
||||
|
||||
// Check if there are buckets on server without corresponding entry in etcd backend and
|
||||
@@ -227,7 +229,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
// Generate response.
|
||||
encodedSuccessResponse := encodeResponse(LocationResponse{})
|
||||
// Get current region.
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
if region != globalMinioDefaultRegion {
|
||||
encodedSuccessResponse = encodeResponse(LocationResponse{
|
||||
Location: region,
|
||||
@@ -1415,6 +1417,19 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if formValues.Get(postPolicyBucketTagging) != "" {
|
||||
tags, err := tags.ParseObjectXML(strings.NewReader(formValues.Get(postPolicyBucketTagging)))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
||||
return
|
||||
}
|
||||
tagsStr := tags.String()
|
||||
opts.UserDefined[xhttp.AmzObjectTagging] = tagsStr
|
||||
} else {
|
||||
// avoid user set an invalid tag using `X-Amz-Tagging`
|
||||
delete(opts.UserDefined, xhttp.AmzObjectTagging)
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -1661,7 +1676,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
case rcfg.HasActiveRules("", true):
|
||||
case rcfg != nil && rcfg.HasActiveRules("", true):
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
|
||||
// Wrapper for calling RemoveBucket HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestRemoveBucketHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testRemoveBucketHandler, endpoints: []string{"RemoveBucket"}})
|
||||
}
|
||||
|
||||
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
@@ -78,7 +78,7 @@ func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
|
||||
// Wrapper for calling GetBucketPolicy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestGetBucketLocationHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testGetBucketLocationHandler, endpoints: []string{"GetBucketLocation"}})
|
||||
}
|
||||
|
||||
func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
@@ -220,7 +220,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
|
||||
// Wrapper for calling HeadBucket HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestHeadBucketHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testHeadBucketHandler, []string{"HeadBucket"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testHeadBucketHandler, endpoints: []string{"HeadBucket"}})
|
||||
}
|
||||
|
||||
func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
@@ -322,7 +322,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
|
||||
// Wrapper for calling TestListMultipartUploadsHandler tests for both Erasure multiple disks and single node setup.
|
||||
func TestListMultipartUploadsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testListMultipartUploadsHandler, []string{"ListMultipartUploads"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testListMultipartUploadsHandler, endpoints: []string{"ListMultipartUploads"}})
|
||||
}
|
||||
|
||||
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
|
||||
@@ -558,7 +558,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
|
||||
// Wrapper for calling TestListBucketsHandler tests for both Erasure multiple disks and single node setup.
|
||||
func TestListBucketsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testListBucketsHandler, []string{"ListBuckets"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testListBucketsHandler, endpoints: []string{"ListBuckets"}})
|
||||
}
|
||||
|
||||
// testListBucketsHandler - Tests validate listing of buckets.
|
||||
@@ -649,7 +649,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
|
||||
// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects", "PutBucketPolicy"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPIDeleteMultipleObjectsHandler, endpoints: []string{"DeleteMultipleObjects", "PutBucketPolicy"}})
|
||||
}
|
||||
|
||||
func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -64,7 +64,8 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
rcfg, err := globalBucketObjectLockSys.Get(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -76,7 +77,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Validate the received bucket policy document
|
||||
if err = bucketLifecycle.Validate(); err != nil {
|
||||
if err = bucketLifecycle.Validate(rcfg); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
|
||||
// Test S3 Bucket lifecycle APIs with wrong credentials
|
||||
func TestBucketLifecycleWrongCredentials(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testBucketLifecycleHandlersWrongCredentials, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testBucketLifecycleHandlersWrongCredentials, endpoints: []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"}})
|
||||
}
|
||||
|
||||
// Test for authentication
|
||||
@@ -145,7 +145,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
|
||||
|
||||
// Test S3 Bucket lifecycle APIs
|
||||
func TestBucketLifecycle(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testBucketLifecycleHandlers, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testBucketLifecycleHandlers, endpoints: []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"}})
|
||||
}
|
||||
|
||||
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
|
||||
|
||||
@@ -40,7 +40,7 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/s3select"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
"github.com/zeebo/xxh3"
|
||||
)
|
||||
|
||||
@@ -72,6 +72,7 @@ func NewLifecycleSys() *LifecycleSys {
|
||||
}
|
||||
|
||||
func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event string) madmin.TraceInfo {
|
||||
sz, _ := oi.GetActualSize()
|
||||
return madmin.TraceInfo{
|
||||
TraceType: madmin.TraceILM,
|
||||
Time: startTime,
|
||||
@@ -79,6 +80,7 @@ func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event
|
||||
FuncName: event,
|
||||
Duration: duration,
|
||||
Path: pathJoin(oi.Bucket, oi.Name),
|
||||
Bytes: sz,
|
||||
Error: "",
|
||||
Message: getSource(4),
|
||||
Custom: map[string]string{"version-id": oi.VersionID},
|
||||
@@ -277,6 +279,10 @@ func (es *expiryState) getWorkerCh(h uint64) chan<- expiryOp {
|
||||
}
|
||||
|
||||
func (es *expiryState) ResizeWorkers(n int) {
|
||||
if n == 0 {
|
||||
n = 100
|
||||
}
|
||||
|
||||
// Lock to avoid multiple resizes to happen at the same time.
|
||||
es.mu.Lock()
|
||||
defer es.mu.Unlock()
|
||||
@@ -538,6 +544,10 @@ func (t *transitionState) UpdateWorkers(n int) {
|
||||
}
|
||||
|
||||
func (t *transitionState) updateWorkers(n int) {
|
||||
if n == 0 {
|
||||
n = 100
|
||||
}
|
||||
|
||||
for t.numWorkers < n {
|
||||
go t.worker(t.objAPI)
|
||||
t.numWorkers++
|
||||
@@ -573,6 +583,10 @@ func enqueueTransitionImmediate(obj ObjectInfo, src lcEventSrc) {
|
||||
if lc, err := globalLifecycleSys.Get(obj.Bucket); err == nil {
|
||||
switch event := lc.Eval(obj.ToLifecycleOpts()); event.Action {
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
if obj.DeleteMarker || obj.IsDir {
|
||||
// nothing to transition
|
||||
return
|
||||
}
|
||||
globalTransitionState.queueTransitionTask(obj, event, src)
|
||||
}
|
||||
}
|
||||
@@ -663,11 +677,12 @@ func genTransitionObjName(bucket string) (string, error) {
|
||||
// is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved
|
||||
// to the transition tier without decrypting or re-encrypting.
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo, lae lcAuditEvent) (err error) {
|
||||
timeILM := globalScannerMetrics.timeILM(lae.Action)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
globalScannerMetrics.timeILM(lae.Action)(1)
|
||||
timeILM(1)
|
||||
}()
|
||||
|
||||
opts := ObjectOptions{
|
||||
@@ -721,12 +736,12 @@ func auditTierActions(ctx context.Context, tier string, bytes int64) func(err er
|
||||
|
||||
// getTransitionedObjectReader returns a reader from the transitioned tier.
|
||||
func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, oi ObjectInfo, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
tgtClient, err := globalTierConfigMgr.getDriver(oi.TransitionedObject.Tier)
|
||||
tgtClient, err := globalTierConfigMgr.getDriver(ctx, oi.TransitionedObject.Tier)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("transition storage class not configured")
|
||||
return nil, fmt.Errorf("transition storage class not configured: %w", err)
|
||||
}
|
||||
|
||||
fn, off, length, err := NewGetObjectReader(rs, oi, opts)
|
||||
fn, off, length, err := NewGetObjectReader(rs, oi, opts, h)
|
||||
if err != nil {
|
||||
return nil, ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
@@ -124,7 +124,7 @@ func (api objectAPIHandlers) listObjectVersionsHandler(w http.ResponseWriter, r
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo, checkObjMeta)
|
||||
response := generateListVersionsResponse(ctx, bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo, checkObjMeta)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponseList(response))
|
||||
@@ -202,7 +202,7 @@ func (api objectAPIHandlers) listObjectsV2Handler(ctx context.Context, w http.Re
|
||||
|
||||
if r.Header.Get(xMinIOExtract) == "true" && strings.Contains(prefix, archivePattern) {
|
||||
// Initiate a list objects operation inside a zip file based in the input params
|
||||
listObjectsV2Info, err = listObjectsV2InArchive(ctx, objectAPI, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
|
||||
listObjectsV2Info, err = listObjectsV2InArchive(ctx, objectAPI, bucket, prefix, token, delimiter, maxKeys, startAfter, r.Header)
|
||||
} else {
|
||||
// Initiate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
@@ -219,7 +219,7 @@ func (api objectAPIHandlers) listObjectsV2Handler(ctx context.Context, w http.Re
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
response := generateListObjectsV2Response(ctx, bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, checkObjMeta)
|
||||
|
||||
@@ -231,7 +231,7 @@ func parseRequestToken(token string) (subToken string, nodeIndex int) {
|
||||
if token == "" {
|
||||
return token, -1
|
||||
}
|
||||
i := strings.Index(token, ":")
|
||||
i := strings.Index(token, getKeySeparator())
|
||||
if i < 0 {
|
||||
return token, -1
|
||||
}
|
||||
@@ -318,7 +318,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
response := generateListObjectsV1Response(ctx, bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponseList(response))
|
||||
|
||||
@@ -37,8 +37,8 @@ import (
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/sync/errgroup"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
)
|
||||
|
||||
// BucketMetadataSys captures all bucket metadata for a given cluster.
|
||||
@@ -46,6 +46,7 @@ type BucketMetadataSys struct {
|
||||
objAPI ObjectLayer
|
||||
|
||||
sync.RWMutex
|
||||
initialized bool
|
||||
metadataMap map[string]BucketMetadata
|
||||
}
|
||||
|
||||
@@ -433,6 +434,8 @@ func (sys *BucketMetadataSys) GetConfigFromDisk(ctx context.Context, bucket stri
|
||||
return loadBucketMetadata(ctx, objAPI, bucket)
|
||||
}
|
||||
|
||||
var errBucketMetadataNotInitialized = errors.New("bucket metadata not initialized yet")
|
||||
|
||||
// GetConfig returns a specific configuration from the bucket metadata.
|
||||
// The returned object may not be modified.
|
||||
// reloaded will be true if metadata refreshed from disk
|
||||
@@ -454,6 +457,10 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
|
||||
}
|
||||
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
if !sys.Initialized() {
|
||||
// bucket metadata not yet initialized
|
||||
return newBucketMetadata(bucket), reloaded, errBucketMetadataNotInitialized
|
||||
}
|
||||
return meta, reloaded, err
|
||||
}
|
||||
sys.Lock()
|
||||
@@ -498,9 +505,10 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
}
|
||||
|
||||
errs := g.Wait()
|
||||
for _, err := range errs {
|
||||
for index, err := range errs {
|
||||
if err != nil {
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
internalLogOnceIf(ctx, fmt.Errorf("Unable to load bucket metadata, will be retried: %w", err),
|
||||
"load-bucket-metadata-"+buckets[index].Name, logger.WarningKind)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -583,6 +591,14 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
|
||||
}
|
||||
}
|
||||
|
||||
// Initialized indicates if bucket metadata sys is initialized atleast once.
|
||||
func (sys *BucketMetadataSys) Initialized() bool {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
|
||||
return sys.initialized
|
||||
}
|
||||
|
||||
// Loads bucket metadata for all buckets into BucketMetadataSys.
|
||||
func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
|
||||
count := 100 // load 100 bucket metadata at a time.
|
||||
@@ -596,6 +612,10 @@ func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
|
||||
buckets = buckets[count:]
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
sys.initialized = true
|
||||
sys.Unlock()
|
||||
|
||||
if globalIsDistErasure {
|
||||
go sys.refreshBucketsMetadataLoop(ctx, failedBuckets)
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ import (
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
@@ -490,7 +490,7 @@ func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kms
|
||||
}
|
||||
|
||||
metadata := make(map[string]string)
|
||||
key, err := GlobalKMS.GenerateKey(ctx, "", kmsContext)
|
||||
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{AssociatedData: kmsContext})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -519,7 +519,11 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extKey, err := GlobalKMS.DecryptKey(keyID, kmsKey, kmsContext)
|
||||
extKey, err := GlobalKMS.Decrypt(context.TODO(), &kms.DecryptRequest{
|
||||
Name: keyID,
|
||||
Ciphertext: kmsKey,
|
||||
AssociatedData: kmsContext,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -66,8 +66,9 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
config.SetRegion(globalSite.Region)
|
||||
if err = config.Validate(globalSite.Region, globalEventNotifier.targetList); err != nil {
|
||||
region := globalSite.Region()
|
||||
config.SetRegion(region)
|
||||
if err = config.Validate(region, globalEventNotifier.targetList); err != nil {
|
||||
arnErr, ok := err.(*event.ErrARNNotFound)
|
||||
if ok {
|
||||
for i, queue := range config.QueueList {
|
||||
@@ -134,7 +135,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region, globalEventNotifier.targetList)
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region(), globalEventNotifier.targetList)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
if event.IsEventError(err) {
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// BucketObjectLockSys - map of bucket and retention configuration.
|
||||
@@ -44,7 +44,6 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention,
|
||||
if errors.Is(err, errInvalidArgument) {
|
||||
return r, err
|
||||
}
|
||||
logger.CriticalIf(context.Background(), err)
|
||||
return r, err
|
||||
}
|
||||
return config.ToRetention(), nil
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -29,8 +29,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/policy/condition"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/policy/condition"
|
||||
)
|
||||
|
||||
func getAnonReadOnlyBucketPolicy(bucketName string) *policy.BucketPolicy {
|
||||
@@ -107,7 +107,7 @@ func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.BucketPolic
|
||||
|
||||
// Wrapper for calling Create Bucket and ensure we get one and only one success.
|
||||
func TestCreateBucket(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testCreateBucket, []string{"MakeBucket"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testCreateBucket, endpoints: []string{"MakeBucket"}})
|
||||
}
|
||||
|
||||
// testCreateBucket - Test for calling Create Bucket and ensure we get one and only one success.
|
||||
@@ -154,7 +154,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute
|
||||
|
||||
// Wrapper for calling Put Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestPutBucketPolicyHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testPutBucketPolicyHandler, []string{"PutBucketPolicy"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testPutBucketPolicyHandler, endpoints: []string{"PutBucketPolicy"}})
|
||||
}
|
||||
|
||||
// testPutBucketPolicyHandler - Test for Bucket policy end point.
|
||||
@@ -373,7 +373,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
|
||||
// Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestGetBucketPolicyHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testGetBucketPolicyHandler, []string{"PutBucketPolicy", "GetBucketPolicy"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testGetBucketPolicyHandler, endpoints: []string{"PutBucketPolicy", "GetBucketPolicy"}})
|
||||
}
|
||||
|
||||
// testGetBucketPolicyHandler - Test for end point which fetches the access policy json of the given bucket.
|
||||
@@ -577,7 +577,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
|
||||
// Wrapper for calling Delete Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestDeleteBucketPolicyHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testDeleteBucketPolicyHandler, []string{"PutBucketPolicy", "DeleteBucketPolicy"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testDeleteBucketPolicyHandler, endpoints: []string{"PutBucketPolicy", "DeleteBucketPolicy"}})
|
||||
}
|
||||
|
||||
// testDeleteBucketPolicyHandler - Test for Delete bucket policy end point.
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// PolicySys - policy subsystem.
|
||||
|
||||
@@ -49,8 +49,8 @@ var bucketStorageCache = cachevalue.New[DataUsageInfo]()
|
||||
func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
||||
bucketStorageCache.InitOnce(10*time.Second,
|
||||
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
|
||||
func() (DataUsageInfo, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
func(ctx context.Context) (DataUsageInfo, error) {
|
||||
ctx, done := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
@@ -59,8 +59,8 @@ func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
||||
}
|
||||
|
||||
// GetBucketUsageInfo return bucket usage info for a given bucket
|
||||
func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, error) {
|
||||
dui, err := bucketStorageCache.Get()
|
||||
func (sys *BucketQuotaSys) GetBucketUsageInfo(ctx context.Context, bucket string) (BucketUsageInfo, error) {
|
||||
dui, err := bucketStorageCache.GetWithCtx(ctx)
|
||||
timedout := OperationTimedOut{}
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &timedout) {
|
||||
if len(dui.BucketsUsage) > 0 {
|
||||
@@ -118,7 +118,7 @@ func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string,
|
||||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
}
|
||||
|
||||
bui, err := sys.GetBucketUsageInfo(bucket)
|
||||
bui, err := sys.GetBucketUsageInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// PutBucketReplicationConfigHandler - PUT Bucket replication configuration.
|
||||
|
||||
@@ -534,7 +534,7 @@ func getHealReplicateObjectInfo(oi ObjectInfo, rcfg replicationConfig) Replicate
|
||||
rstate.ReplicateDecisionStr = dsc.String()
|
||||
asz, _ := oi.GetActualSize()
|
||||
|
||||
return ReplicateObjectInfo{
|
||||
r := ReplicateObjectInfo{
|
||||
Name: oi.Name,
|
||||
Size: oi.Size,
|
||||
ActualSize: asz,
|
||||
@@ -558,6 +558,10 @@ func getHealReplicateObjectInfo(oi ObjectInfo, rcfg replicationConfig) Replicate
|
||||
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
|
||||
UserTags: oi.UserTags,
|
||||
}
|
||||
if r.SSEC {
|
||||
r.Checksum = oi.Checksum
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// ReplicationState - returns replication state using other internal replication metadata in ObjectInfo
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -74,20 +75,18 @@ const (
|
||||
ObjectLockRetentionTimestamp = "objectlock-retention-timestamp"
|
||||
// ObjectLockLegalHoldTimestamp - the last time a legal hold metadata modification happened on this cluster for this object version
|
||||
ObjectLockLegalHoldTimestamp = "objectlock-legalhold-timestamp"
|
||||
// ReplicationWorkerMultiplier is suggested worker multiplier if traffic exceeds replication worker capacity
|
||||
ReplicationWorkerMultiplier = 1.5
|
||||
|
||||
// ReplicationSsecChecksumHeader - the encrypted checksum of the SSE-C encrypted object.
|
||||
ReplicationSsecChecksumHeader = "X-Minio-Replication-Ssec-Crc"
|
||||
)
|
||||
|
||||
// gets replication config associated to a given bucket name.
|
||||
func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) {
|
||||
rCfg, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucketName}) || errors.Is(err, errInvalidArgument) {
|
||||
return rCfg, err
|
||||
}
|
||||
logger.CriticalIf(ctx, err)
|
||||
if err != nil && !errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucketName}) {
|
||||
return rCfg, err
|
||||
}
|
||||
return rCfg, err
|
||||
return rCfg, nil
|
||||
}
|
||||
|
||||
// validateReplicationDestination returns error if replication destination bucket missing or not configured
|
||||
@@ -261,10 +260,16 @@ func mustReplicate(ctx context.Context, bucket, object string, mopts mustReplica
|
||||
if mopts.replicationRequest { // incoming replication request on target cluster
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
replLogOnceIf(ctx, err, bucket)
|
||||
return
|
||||
}
|
||||
if cfg == nil {
|
||||
return
|
||||
}
|
||||
|
||||
opts := replication.ObjectOpts{
|
||||
Name: object,
|
||||
SSEC: crypto.SSEC.IsEncrypted(mopts.meta),
|
||||
@@ -312,6 +317,7 @@ var standardHeaders = []string{
|
||||
func hasReplicationRules(ctx context.Context, bucket string, objects []ObjectToDelete) bool {
|
||||
c, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || c == nil {
|
||||
replLogOnceIf(ctx, err, bucket)
|
||||
return false
|
||||
}
|
||||
for _, obj := range objects {
|
||||
@@ -331,6 +337,7 @@ func isStandardHeader(matchHeaderKey string) bool {
|
||||
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, delOpts ObjectOptions, gerr error) (dsc ReplicateDecision) {
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || rcfg == nil {
|
||||
replLogOnceIf(ctx, err, bucket)
|
||||
return
|
||||
}
|
||||
// If incoming request is a replication request, it does not need to be re-replicated.
|
||||
@@ -758,12 +765,34 @@ func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, err error) {
|
||||
func getCRCMeta(oi ObjectInfo, partNum int, h http.Header) map[string]string {
|
||||
meta := make(map[string]string)
|
||||
cs := oi.decryptChecksums(partNum, h)
|
||||
for k, v := range cs {
|
||||
cksum := hash.NewChecksumString(k, v)
|
||||
if cksum == nil {
|
||||
continue
|
||||
}
|
||||
if cksum.Valid() {
|
||||
meta[cksum.Type.Key()] = v
|
||||
}
|
||||
}
|
||||
return meta
|
||||
}
|
||||
|
||||
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, partNum int) (putOpts minio.PutObjectOptions, err error) {
|
||||
meta := make(map[string]string)
|
||||
isSSEC := crypto.SSEC.IsEncrypted(objInfo.UserDefined)
|
||||
|
||||
for k, v := range objInfo.UserDefined {
|
||||
// In case of SSE-C objects copy the allowed internal headers as well
|
||||
if !crypto.SSEC.IsEncrypted(objInfo.UserDefined) || !slices.Contains(maps.Keys(validSSEReplicationHeaders), k) {
|
||||
if !isSSEC || !slices.Contains(maps.Keys(validSSEReplicationHeaders), k) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
if strings.EqualFold(k, ReservedMetadataPrefixLower+"crc") {
|
||||
for k, v := range getCRCMeta(objInfo, partNum, nil) {
|
||||
meta[k] = v
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if isStandardHeader(k) {
|
||||
@@ -777,6 +806,17 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put
|
||||
}
|
||||
}
|
||||
|
||||
if len(objInfo.Checksum) > 0 {
|
||||
// Add encrypted CRC to metadata for SSE-C objects.
|
||||
if isSSEC {
|
||||
meta[ReplicationSsecChecksumHeader] = base64.StdEncoding.EncodeToString(objInfo.Checksum)
|
||||
} else {
|
||||
for k, v := range getCRCMeta(objInfo, 0, nil) {
|
||||
meta[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sc == "" && (objInfo.StorageClass == storageclass.STANDARD || objInfo.StorageClass == storageclass.RRS) {
|
||||
sc = objInfo.StorageClass
|
||||
}
|
||||
@@ -993,7 +1033,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
object := ri.Name
|
||||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
if err != nil || cfg == nil {
|
||||
replLogOnceIf(ctx, err, "get-replication-config-"+bucket)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
@@ -1196,17 +1236,23 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
||||
// make sure we have the latest metadata for metrics calculation
|
||||
rinfo.PrevReplicationStatus = objInfo.TargetReplicationStatus(tgt.ARN)
|
||||
|
||||
size, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
replLogIf(ctx, err)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
UserAgent: "Internal: [Replication]",
|
||||
Host: globalLocalNodeName,
|
||||
})
|
||||
return
|
||||
// Set the encrypted size for SSE-C objects
|
||||
var size int64
|
||||
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
|
||||
size = objInfo.Size
|
||||
} else {
|
||||
size, err = objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
replLogIf(ctx, err)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
UserAgent: "Internal: [Replication]",
|
||||
Host: globalLocalNodeName,
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if tgt.Bucket == "" {
|
||||
@@ -1234,7 +1280,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
||||
// use core client to avoid doing multipart on PUT
|
||||
c := &minio.Core{Client: tgt.Client}
|
||||
|
||||
putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo)
|
||||
putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo, 0)
|
||||
if err != nil {
|
||||
replLogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err))
|
||||
sendEvent(eventArgs{
|
||||
@@ -1267,23 +1313,19 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
||||
}
|
||||
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
|
||||
if objInfo.isMultipart() {
|
||||
if rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object,
|
||||
r, objInfo, putOpts); rinfo.Err != nil {
|
||||
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL()))
|
||||
}
|
||||
}
|
||||
rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts)
|
||||
} else {
|
||||
if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil {
|
||||
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL()))
|
||||
}
|
||||
}
|
||||
_, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts)
|
||||
}
|
||||
if rinfo.Err != nil && minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
globalBucketTargetSys.markOffline(tgt.EndpointURL())
|
||||
if rinfo.Err != nil {
|
||||
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): to (target: %s): %w",
|
||||
bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
|
||||
}
|
||||
if minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
globalBucketTargetSys.markOffline(tgt.EndpointURL())
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -1376,7 +1418,8 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
||||
}
|
||||
|
||||
// Set the encrypted size for SSE-C objects
|
||||
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
|
||||
isSSEC := crypto.SSEC.IsEncrypted(objInfo.UserDefined)
|
||||
if isSSEC {
|
||||
size = objInfo.Size
|
||||
}
|
||||
|
||||
@@ -1436,6 +1479,13 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// SSEC objects will refuse HeadObject without the decryption key.
|
||||
// Ignore the error, since we know the object exists and versioning prevents overwriting existing versions.
|
||||
if isSSEC && strings.Contains(cerr.Error(), errorCodes[ErrSSEEncryptedObject].Description) {
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
rinfo.ReplicationAction = replicateNone
|
||||
goto applyAction
|
||||
}
|
||||
// if target returns error other than NoSuchKey, defer replication attempt
|
||||
if minio.IsNetworkOrHostDown(cerr, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
globalBucketTargetSys.markOffline(tgt.EndpointURL())
|
||||
@@ -1463,6 +1513,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
||||
return
|
||||
}
|
||||
}
|
||||
applyAction:
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
rinfo.Size = size
|
||||
rinfo.ReplicationAction = rAction
|
||||
@@ -1505,7 +1556,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
||||
}
|
||||
} else {
|
||||
var putOpts minio.PutObjectOptions
|
||||
putOpts, err = putReplicationOpts(ctx, tgt.StorageClass, objInfo)
|
||||
putOpts, err = putReplicationOpts(ctx, tgt.StorageClass, objInfo, 0)
|
||||
if err != nil {
|
||||
replLogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err))
|
||||
sendEvent(eventArgs{
|
||||
@@ -1537,27 +1588,19 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
||||
}
|
||||
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
|
||||
if objInfo.isMultipart() {
|
||||
if rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object,
|
||||
r, objInfo, putOpts); rinfo.Err != nil {
|
||||
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
|
||||
} else {
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
}
|
||||
}
|
||||
rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts)
|
||||
} else {
|
||||
if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil {
|
||||
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
|
||||
} else {
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
}
|
||||
}
|
||||
_, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts)
|
||||
}
|
||||
if rinfo.Err != nil && minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
globalBucketTargetSys.markOffline(tgt.EndpointURL())
|
||||
if rinfo.Err != nil {
|
||||
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w",
|
||||
bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
|
||||
}
|
||||
if minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
globalBucketTargetSys.markOffline(tgt.EndpointURL())
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -1577,7 +1620,7 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
||||
break
|
||||
}
|
||||
if minio.ToErrorResponse(err).Code == "PreconditionFailed" {
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
attempts++
|
||||
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
|
||||
@@ -1604,13 +1647,14 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
||||
}()
|
||||
|
||||
var (
|
||||
hr *hash.Reader
|
||||
pInfo minio.ObjectPart
|
||||
hr *hash.Reader
|
||||
pInfo minio.ObjectPart
|
||||
isSSEC = crypto.SSEC.IsEncrypted(objInfo.UserDefined)
|
||||
)
|
||||
|
||||
var objectSize int64
|
||||
for _, partInfo := range objInfo.Parts {
|
||||
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
|
||||
if isSSEC {
|
||||
hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.Size), partInfo.Size, "", "", partInfo.ActualSize)
|
||||
} else {
|
||||
hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.ActualSize), partInfo.ActualSize, "", "", partInfo.ActualSize)
|
||||
@@ -1621,12 +1665,18 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
||||
|
||||
cHeader := http.Header{}
|
||||
cHeader.Add(xhttp.MinIOSourceReplicationRequest, "true")
|
||||
if !isSSEC {
|
||||
crc := getCRCMeta(objInfo, partInfo.Number, nil) // No SSE-C keys here.
|
||||
for k, v := range crc {
|
||||
cHeader.Add(k, v)
|
||||
}
|
||||
}
|
||||
popts := minio.PutObjectPartOptions{
|
||||
SSE: opts.ServerSideEncryption,
|
||||
CustomHeader: cHeader,
|
||||
}
|
||||
|
||||
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
|
||||
if isSSEC {
|
||||
objectSize += partInfo.Size
|
||||
pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.Size, popts)
|
||||
} else {
|
||||
@@ -1636,22 +1686,33 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !crypto.SSEC.IsEncrypted(objInfo.UserDefined) && pInfo.Size != partInfo.ActualSize {
|
||||
if !isSSEC && pInfo.Size != partInfo.ActualSize {
|
||||
return fmt.Errorf("Part size mismatch: got %d, want %d", pInfo.Size, partInfo.ActualSize)
|
||||
}
|
||||
uploadedParts = append(uploadedParts, minio.CompletePart{
|
||||
PartNumber: pInfo.PartNumber,
|
||||
ETag: pInfo.ETag,
|
||||
PartNumber: pInfo.PartNumber,
|
||||
ETag: pInfo.ETag,
|
||||
ChecksumCRC32: pInfo.ChecksumCRC32,
|
||||
ChecksumCRC32C: pInfo.ChecksumCRC32C,
|
||||
ChecksumSHA1: pInfo.ChecksumSHA1,
|
||||
ChecksumSHA256: pInfo.ChecksumSHA256,
|
||||
})
|
||||
}
|
||||
userMeta := map[string]string{
|
||||
validSSEReplicationHeaders[ReservedMetadataPrefix+"Actual-Object-Size"]: objInfo.UserDefined[ReservedMetadataPrefix+"actual-size"],
|
||||
}
|
||||
if isSSEC && objInfo.UserDefined[ReplicationSsecChecksumHeader] != "" {
|
||||
userMeta[ReplicationSsecChecksumHeader] = objInfo.UserDefined[ReplicationSsecChecksumHeader]
|
||||
}
|
||||
|
||||
// really big value but its okay on heavily loaded systems. This is just tail end timeout.
|
||||
cctx, ccancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer ccancel()
|
||||
_, err = c.CompleteMultipartUpload(cctx, bucket, object, uploadID, uploadedParts, minio.PutObjectOptions{
|
||||
UserMetadata: map[string]string{validSSEReplicationHeaders[ReservedMetadataPrefix+"Actual-Object-Size"]: objInfo.UserDefined[ReservedMetadataPrefix+"actual-size"]},
|
||||
UserMetadata: userMeta,
|
||||
Internal: minio.AdvancedPutOptions{
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
// always set this to distinguish between `mc mirror` replication and serverside
|
||||
ReplicationRequest: true,
|
||||
},
|
||||
@@ -2185,12 +2246,12 @@ type proxyResult struct {
|
||||
|
||||
// get Reader from replication target if active-active replication is in place and
|
||||
// this node returns a 404
|
||||
func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, _ http.Header, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (gr *GetObjectReader, proxy proxyResult, err error) {
|
||||
func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (gr *GetObjectReader, proxy proxyResult, err error) {
|
||||
tgt, oi, proxy := proxyHeadToRepTarget(ctx, bucket, object, rs, opts, proxyTargets)
|
||||
if !proxy.Proxy {
|
||||
return nil, proxy, nil
|
||||
}
|
||||
fn, _, _, err := NewGetObjectReader(nil, oi, opts)
|
||||
fn, _, _, err := NewGetObjectReader(nil, oi, opts, h)
|
||||
if err != nil {
|
||||
return nil, proxy, err
|
||||
}
|
||||
@@ -2245,6 +2306,8 @@ func getProxyTargets(ctx context.Context, bucket, object string, opts ObjectOpti
|
||||
}
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || cfg == nil {
|
||||
replLogOnceIf(ctx, err, bucket)
|
||||
|
||||
return &madmin.BucketTargets{}
|
||||
}
|
||||
topts := replication.ObjectOpts{Name: object}
|
||||
@@ -2373,7 +2436,9 @@ func scheduleReplication(ctx context.Context, oi ObjectInfo, o ObjectLayer, dsc
|
||||
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
|
||||
UserTags: oi.UserTags,
|
||||
}
|
||||
|
||||
if ri.SSEC {
|
||||
ri.Checksum = oi.Checksum
|
||||
}
|
||||
if dsc.Synchronous() {
|
||||
replicateObject(ctx, ri, o)
|
||||
} else {
|
||||
@@ -2739,7 +2804,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
||||
s.workerCh <- struct{}{}
|
||||
}()
|
||||
// Allocate new results channel to receive ObjectInfo.
|
||||
objInfoCh := make(chan ObjectInfo)
|
||||
objInfoCh := make(chan itemOrErr[ObjectInfo])
|
||||
cfg, err := getReplicationConfig(ctx, opts.bucket)
|
||||
if err != nil {
|
||||
replLogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed with %w", opts.bucket, opts.arn, err))
|
||||
@@ -2853,17 +2918,19 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
||||
ReplicationProxyRequest: "false",
|
||||
},
|
||||
})
|
||||
sz := roi.Size
|
||||
if err != nil {
|
||||
if roi.DeleteMarker && isErrMethodNotAllowed(ErrorRespToObjectError(err, opts.bucket, roi.Name)) {
|
||||
st.ReplicatedCount++
|
||||
} else {
|
||||
st.FailedCount++
|
||||
}
|
||||
sz = 0
|
||||
} else {
|
||||
st.ReplicatedCount++
|
||||
st.ReplicatedSize += roi.Size
|
||||
}
|
||||
traceFn(err)
|
||||
traceFn(sz, err)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
@@ -2874,7 +2941,12 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
||||
}
|
||||
}(ctx, i)
|
||||
}
|
||||
for obj := range objInfoCh {
|
||||
for res := range objInfoCh {
|
||||
if res.Err != nil {
|
||||
resyncStatus = ResyncFailed
|
||||
replLogIf(ctx, res.Err)
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-s.resyncCancelCh:
|
||||
resyncStatus = ResyncCanceled
|
||||
@@ -2883,11 +2955,11 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
||||
return
|
||||
default:
|
||||
}
|
||||
if heal && lastCheckpoint != "" && lastCheckpoint != obj.Name {
|
||||
if heal && lastCheckpoint != "" && lastCheckpoint != res.Item.Name {
|
||||
continue
|
||||
}
|
||||
lastCheckpoint = ""
|
||||
roi := getHealReplicateObjectInfo(obj, rcfg)
|
||||
roi := getHealReplicateObjectInfo(res.Item, rcfg)
|
||||
if !roi.ExistingObjResync.mustResync() {
|
||||
continue
|
||||
}
|
||||
@@ -2973,17 +3045,17 @@ func (s *replicationResyncer) start(ctx context.Context, objAPI ObjectLayer, opt
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *replicationResyncer) trace(resyncID string, path string) func(err error) {
|
||||
func (s *replicationResyncer) trace(resyncID string, path string) func(sz int64, err error) {
|
||||
startTime := time.Now()
|
||||
return func(err error) {
|
||||
return func(sz int64, err error) {
|
||||
duration := time.Since(startTime)
|
||||
if globalTrace.NumSubscribers(madmin.TraceReplicationResync) > 0 {
|
||||
globalTrace.Publish(replicationResyncTrace(resyncID, startTime, duration, path, err))
|
||||
globalTrace.Publish(replicationResyncTrace(resyncID, startTime, duration, path, err, sz))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func replicationResyncTrace(resyncID string, startTime time.Time, duration time.Duration, path string, err error) madmin.TraceInfo {
|
||||
func replicationResyncTrace(resyncID string, startTime time.Time, duration time.Duration, path string, err error, sz int64) madmin.TraceInfo {
|
||||
var errStr string
|
||||
if err != nil {
|
||||
errStr = err.Error()
|
||||
@@ -2997,6 +3069,7 @@ func replicationResyncTrace(resyncID string, startTime time.Time, duration time.
|
||||
Duration: duration,
|
||||
Path: path,
|
||||
Error: errStr,
|
||||
Bytes: sz,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3138,7 +3211,7 @@ func saveResyncStatus(ctx context.Context, bucket string, brs BucketReplicationR
|
||||
func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string, opts madmin.ReplDiffOpts) (chan madmin.DiffInfo, error) {
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
replLogIf(ctx, err)
|
||||
replLogOnceIf(ctx, err, bucket)
|
||||
return nil, err
|
||||
}
|
||||
tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
@@ -3147,7 +3220,7 @@ func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objInfoCh := make(chan ObjectInfo, 10)
|
||||
objInfoCh := make(chan itemOrErr[ObjectInfo], 10)
|
||||
if err := objAPI.Walk(ctx, bucket, opts.Prefix, objInfoCh, WalkOptions{}); err != nil {
|
||||
replLogIf(ctx, err)
|
||||
return nil, err
|
||||
@@ -3159,11 +3232,17 @@ func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string,
|
||||
diffCh := make(chan madmin.DiffInfo, 4000)
|
||||
go func() {
|
||||
defer xioutil.SafeClose(diffCh)
|
||||
for obj := range objInfoCh {
|
||||
for res := range objInfoCh {
|
||||
if res.Err != nil {
|
||||
diffCh <- madmin.DiffInfo{Err: res.Err}
|
||||
return
|
||||
}
|
||||
if contextCanceled(ctx) {
|
||||
// Just consume input...
|
||||
continue
|
||||
}
|
||||
obj := res.Item
|
||||
|
||||
// Ignore object prefixes which are excluded
|
||||
// from versioning via the MinIO bucket versioning extension.
|
||||
if globalBucketVersioningSys.PrefixSuspended(bucket, obj.Name) {
|
||||
@@ -3231,7 +3310,11 @@ func QueueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, ret
|
||||
if oi.ModTime.IsZero() {
|
||||
return
|
||||
}
|
||||
rcfg, _ := getReplicationConfig(ctx, bucket)
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
replLogOnceIf(ctx, err, bucket)
|
||||
return
|
||||
}
|
||||
tgts, _ := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
queueReplicationHeal(ctx, bucket, oi, replicationConfig{
|
||||
Config: rcfg,
|
||||
|
||||
@@ -428,7 +428,7 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
if arn.Type == madmin.ReplicationService {
|
||||
// reject removal of remote target if replication configuration is present
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err == nil {
|
||||
if err == nil && rcfg != nil {
|
||||
for _, tgtArn := range rcfg.FilterTargetArns(replication.ObjectOpts{OpType: replication.AllReplicationType}) {
|
||||
if err == nil && (tgtArn == arnStr || rcfg.RoleArn == arnStr) {
|
||||
sys.RLock()
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -82,7 +82,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
||||
}, r.URL)
|
||||
return
|
||||
}
|
||||
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
|
||||
if rc, _ := getReplicationConfig(ctx, bucket); rc != nil && v.Suspended() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "A replication configuration is present on this bucket, bucket wide versioning cannot be suspended.",
|
||||
|
||||
@@ -65,5 +65,5 @@ var (
|
||||
MinioBannerName = "MinIO Object Storage Server"
|
||||
|
||||
// MinioLicense - MinIO server license.
|
||||
MinioLicense = "GNU AGPLv3 <https://www.gnu.org/licenses/agpl-3.0.html>"
|
||||
MinioLicense = "GNU AGPLv3 - https://www.gnu.org/licenses/agpl-3.0.html"
|
||||
)
|
||||
|
||||
@@ -80,6 +80,9 @@ func runCallhome(ctx context.Context, objAPI ObjectLayer) bool {
|
||||
ctx = lkctx.Context()
|
||||
defer locker.Unlock(lkctx)
|
||||
|
||||
// Perform callhome once and then keep running it at regular intervals.
|
||||
performCallhome(ctx)
|
||||
|
||||
callhomeTimer := time.NewTimer(globalCallhomeConfig.FrequencyDur())
|
||||
defer callhomeTimer.Stop()
|
||||
|
||||
@@ -141,11 +144,14 @@ func performCallhome(ctx context.Context) {
|
||||
select {
|
||||
case hi, hasMore := <-healthInfoCh:
|
||||
if !hasMore {
|
||||
auditOptions := AuditLogOptions{Event: "callhome:diagnostics"}
|
||||
// Received all data. Send to SUBNET and return
|
||||
err := sendHealthInfo(ctx, healthInfo)
|
||||
if err != nil {
|
||||
internalLogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err))
|
||||
auditOptions.Error = err.Error()
|
||||
}
|
||||
auditLogInternal(ctx, auditOptions)
|
||||
return
|
||||
}
|
||||
healthInfo = hi
|
||||
|
||||
@@ -21,10 +21,8 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/gob"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
@@ -49,7 +47,6 @@ import (
|
||||
"github.com/minio/console/api/operations"
|
||||
consoleoauth2 "github.com/minio/console/pkg/auth/idp/oauth2"
|
||||
consoleCerts "github.com/minio/console/pkg/certs"
|
||||
"github.com/minio/kms-go/kes"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
@@ -58,11 +55,11 @@ import (
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/certs"
|
||||
"github.com/minio/pkg/v2/console"
|
||||
"github.com/minio/pkg/v2/ellipses"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
"github.com/minio/pkg/v3/certs"
|
||||
"github.com/minio/pkg/v3/console"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
// serverDebugLog will enable debug printing
|
||||
@@ -73,6 +70,13 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
if !term.IsTerminal(int(os.Stdout.Fd())) || !term.IsTerminal(int(os.Stderr.Fd())) {
|
||||
color.TurnOff()
|
||||
}
|
||||
if env.Get("NO_COLOR", "") != "" || env.Get("TERM", "") == "dumb" {
|
||||
color.TurnOff()
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
if mousetrap.StartedByExplorer() {
|
||||
fmt.Printf("Don't double-click %s\n", os.Args[0])
|
||||
@@ -131,6 +135,9 @@ func minioConfigToConsoleFeatures() {
|
||||
os.Setenv("CONSOLE_LOG_QUERY_AUTH_TOKEN", value)
|
||||
}
|
||||
}
|
||||
if value := env.Get(config.EnvBrowserRedirectURL, ""); value != "" {
|
||||
os.Setenv("CONSOLE_BROWSER_REDIRECT_URL", value)
|
||||
}
|
||||
// pass the console subpath configuration
|
||||
if globalBrowserRedirectURL != nil {
|
||||
subPath := path.Clean(pathJoin(strings.TrimSpace(globalBrowserRedirectURL.Path), SlashSeparator))
|
||||
@@ -169,7 +176,10 @@ func minioConfigToConsoleFeatures() {
|
||||
os.Setenv("CONSOLE_STS_DURATION", valueSession)
|
||||
}
|
||||
|
||||
os.Setenv("CONSOLE_MINIO_REGION", globalSite.Region)
|
||||
os.Setenv("CONSOLE_MINIO_SITE_NAME", globalSite.Name())
|
||||
os.Setenv("CONSOLE_MINIO_SITE_REGION", globalSite.Region())
|
||||
os.Setenv("CONSOLE_MINIO_REGION", globalSite.Region())
|
||||
|
||||
os.Setenv("CONSOLE_CERT_PASSWD", env.Get("MINIO_CERT_PASSWD", ""))
|
||||
|
||||
// This section sets Browser (console) stored config
|
||||
@@ -392,20 +402,38 @@ func buildServerCtxt(ctx *cli.Context, ctxt *serverCtxt) (err error) {
|
||||
ctxt.certsDirSet = true
|
||||
}
|
||||
|
||||
memAvailable := availableMemory()
|
||||
if ctx.IsSet("memlimit") || ctx.GlobalIsSet("memlimit") {
|
||||
memlimit := ctx.String("memlimit")
|
||||
if memlimit == "" {
|
||||
memlimit = ctx.GlobalString("memlimit")
|
||||
}
|
||||
mlimit, err := humanize.ParseBytes(memlimit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if mlimit > memAvailable {
|
||||
logger.Info("WARNING: maximum memory available (%s) smaller than specified --memlimit=%s, ignoring --memlimit value",
|
||||
humanize.IBytes(memAvailable), memlimit)
|
||||
}
|
||||
ctxt.MemLimit = mlimit
|
||||
} else {
|
||||
ctxt.MemLimit = memAvailable
|
||||
}
|
||||
|
||||
if memAvailable < ctxt.MemLimit {
|
||||
ctxt.MemLimit = memAvailable
|
||||
}
|
||||
|
||||
ctxt.FTP = ctx.StringSlice("ftp")
|
||||
ctxt.SFTP = ctx.StringSlice("sftp")
|
||||
|
||||
ctxt.Interface = ctx.String("interface")
|
||||
ctxt.UserTimeout = ctx.Duration("conn-user-timeout")
|
||||
ctxt.ConnReadDeadline = ctx.Duration("conn-read-deadline")
|
||||
ctxt.ConnWriteDeadline = ctx.Duration("conn-write-deadline")
|
||||
ctxt.ConnClientReadDeadline = ctx.Duration("conn-client-read-deadline")
|
||||
ctxt.ConnClientWriteDeadline = ctx.Duration("conn-client-write-deadline")
|
||||
|
||||
ctxt.ShutdownTimeout = ctx.Duration("shutdown-timeout")
|
||||
ctxt.SendBufSize = ctx.Int("send-buf-size")
|
||||
ctxt.RecvBufSize = ctx.Int("recv-buf-size")
|
||||
ctxt.IdleTimeout = ctx.Duration("idle-timeout")
|
||||
ctxt.ReadHeaderTimeout = ctx.Duration("read-header-timeout")
|
||||
ctxt.MaxIdleConnsPerHost = ctx.Int("max-idle-conns-per-host")
|
||||
ctxt.UserTimeout = ctx.Duration("conn-user-timeout")
|
||||
ctxt.ShutdownTimeout = ctx.Duration("shutdown-timeout")
|
||||
|
||||
if conf := ctx.String("config"); len(conf) > 0 {
|
||||
err = mergeServerCtxtFromConfigFile(conf, ctxt)
|
||||
@@ -852,125 +880,28 @@ func loadRootCredentials() {
|
||||
// Initialize KMS global variable after valiadating and loading the configuration.
|
||||
// It depends on KMS env variables and global cli flags.
|
||||
func handleKMSConfig() {
|
||||
if env.IsSet(kms.EnvKMSSecretKey) && env.IsSet(kms.EnvKESEndpoint) {
|
||||
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKMSSecretKey, kms.EnvKESEndpoint))
|
||||
present, err := kms.IsPresent()
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Invalid KMS configuration specified")
|
||||
}
|
||||
if !present {
|
||||
return
|
||||
}
|
||||
|
||||
if env.IsSet(kms.EnvKMSSecretKey) {
|
||||
KMS, err := kms.Parse(env.Get(kms.EnvKMSSecretKey, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to parse the KMS secret key inherited from the shell environment")
|
||||
}
|
||||
GlobalKMS = KMS
|
||||
KMS, err := kms.Connect(GlobalContext, &kms.ConnectionOptions{
|
||||
CADir: globalCertsCADir.Get(),
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Failed to connect to KMS")
|
||||
}
|
||||
if env.IsSet(kms.EnvKESEndpoint) {
|
||||
if env.IsSet(kms.EnvKESAPIKey) {
|
||||
if env.IsSet(kms.EnvKESClientKey) {
|
||||
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientKey))
|
||||
}
|
||||
if env.IsSet(kms.EnvKESClientCert) {
|
||||
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientCert))
|
||||
}
|
||||
}
|
||||
if !env.IsSet(kms.EnvKESKeyName) {
|
||||
logger.Fatal(errors.New("Invalid KES configuration"), fmt.Sprintf("The mandatory environment variable %q not set", kms.EnvKESKeyName))
|
||||
}
|
||||
|
||||
var endpoints []string
|
||||
for _, endpoint := range strings.Split(env.Get(kms.EnvKESEndpoint, ""), ",") {
|
||||
if strings.TrimSpace(endpoint) == "" {
|
||||
continue
|
||||
}
|
||||
if !ellipses.HasEllipses(endpoint) {
|
||||
endpoints = append(endpoints, endpoint)
|
||||
continue
|
||||
}
|
||||
patterns, err := ellipses.FindEllipsesPatterns(endpoint)
|
||||
if err != nil {
|
||||
logger.Fatal(err, fmt.Sprintf("Invalid KES endpoint %q", endpoint))
|
||||
}
|
||||
for _, lbls := range patterns.Expand() {
|
||||
endpoints = append(endpoints, strings.Join(lbls, ""))
|
||||
}
|
||||
}
|
||||
rootCAs, err := certs.GetRootCAs(env.Get(kms.EnvKESServerCA, globalCertsCADir.Get()))
|
||||
if err != nil {
|
||||
logger.Fatal(err, fmt.Sprintf("Unable to load X.509 root CAs for KES from %q", env.Get(kms.EnvKESServerCA, globalCertsCADir.Get())))
|
||||
}
|
||||
|
||||
var kmsConf kms.Config
|
||||
if env.IsSet(kms.EnvKESAPIKey) {
|
||||
key, err := kes.ParseAPIKey(env.Get(kms.EnvKESAPIKey, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, fmt.Sprintf("Failed to parse KES API key from %q", env.Get(kms.EnvKESAPIKey, "")))
|
||||
}
|
||||
kmsConf = kms.Config{
|
||||
Endpoints: endpoints,
|
||||
DefaultKeyID: env.Get(kms.EnvKESKeyName, ""),
|
||||
APIKey: key,
|
||||
RootCAs: rootCAs,
|
||||
}
|
||||
} else {
|
||||
loadX509KeyPair := func(certFile, keyFile string) (tls.Certificate, error) {
|
||||
// Manually load the certificate and private key into memory.
|
||||
// We need to check whether the private key is encrypted, and
|
||||
// if so, decrypt it using the user-provided password.
|
||||
certBytes, err := os.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("Unable to load KES client certificate as specified by the shell environment: %v", err)
|
||||
}
|
||||
keyBytes, err := os.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("Unable to load KES client private key as specified by the shell environment: %v", err)
|
||||
}
|
||||
privateKeyPEM, rest := pem.Decode(bytes.TrimSpace(keyBytes))
|
||||
if len(rest) != 0 {
|
||||
return tls.Certificate{}, errors.New("Unable to load KES client private key as specified by the shell environment: private key contains additional data")
|
||||
}
|
||||
if x509.IsEncryptedPEMBlock(privateKeyPEM) {
|
||||
keyBytes, err = x509.DecryptPEMBlock(privateKeyPEM, []byte(env.Get(kms.EnvKESClientPassword, "")))
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("Unable to decrypt KES client private key as specified by the shell environment: %v", err)
|
||||
}
|
||||
keyBytes = pem.EncodeToMemory(&pem.Block{Type: privateKeyPEM.Type, Bytes: keyBytes})
|
||||
}
|
||||
certificate, err := tls.X509KeyPair(certBytes, keyBytes)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("Unable to load KES client certificate as specified by the shell environment: %v", err)
|
||||
}
|
||||
return certificate, nil
|
||||
}
|
||||
|
||||
reloadCertEvents := make(chan tls.Certificate, 1)
|
||||
certificate, err := certs.NewCertificate(env.Get(kms.EnvKESClientCert, ""), env.Get(kms.EnvKESClientKey, ""), loadX509KeyPair)
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Failed to load KES client certificate")
|
||||
}
|
||||
certificate.Watch(context.Background(), 15*time.Minute, syscall.SIGHUP)
|
||||
certificate.Notify(reloadCertEvents)
|
||||
|
||||
kmsConf = kms.Config{
|
||||
Endpoints: endpoints,
|
||||
DefaultKeyID: env.Get(kms.EnvKESKeyName, ""),
|
||||
Certificate: certificate,
|
||||
ReloadCertEvents: reloadCertEvents,
|
||||
RootCAs: rootCAs,
|
||||
}
|
||||
}
|
||||
|
||||
KMS, err := kms.NewWithConfig(kmsConf)
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to initialize a connection to KES as specified by the shell environment")
|
||||
}
|
||||
// We check that the default key ID exists or try to create it otherwise.
|
||||
// This implicitly checks that we can communicate to KES. We don't treat
|
||||
// a policy error as failure condition since MinIO may not have the permission
|
||||
// to create keys - just to generate/decrypt data encryption keys.
|
||||
if err = KMS.CreateKey(context.Background(), env.Get(kms.EnvKESKeyName, "")); err != nil && !errors.Is(err, kes.ErrKeyExists) && !errors.Is(err, kes.ErrNotAllowed) {
|
||||
logger.Fatal(err, "Unable to initialize a connection to KES as specified by the shell environment")
|
||||
}
|
||||
GlobalKMS = KMS
|
||||
if _, err = KMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{}); errors.Is(err, kms.ErrKeyNotFound) {
|
||||
err = KMS.CreateKey(GlobalContext, &kms.CreateKeyRequest{Name: KMS.DefaultKey})
|
||||
}
|
||||
if err != nil && !errors.Is(err, kms.ErrKeyExists) && !errors.Is(err, kms.ErrPermission) {
|
||||
logger.Fatal(err, "Failed to connect to KMS")
|
||||
}
|
||||
GlobalKMS = KMS
|
||||
}
|
||||
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
|
||||
|
||||
@@ -56,7 +56,7 @@ import (
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
func initHelp() {
|
||||
@@ -240,6 +240,11 @@ func initHelp() {
|
||||
Description: "manage Browser HTTP specific features, such as Security headers, etc.",
|
||||
Optional: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.ILMSubSys,
|
||||
Description: "manage ILM settings for expiration and transition workers",
|
||||
Optional: true,
|
||||
},
|
||||
}
|
||||
|
||||
if globalIsErasure {
|
||||
@@ -288,6 +293,7 @@ func initHelp() {
|
||||
config.DriveSubSys: drive.HelpDrive,
|
||||
config.CacheSubSys: cache.Help,
|
||||
config.BrowserSubSys: browser.Help,
|
||||
config.ILMSubSys: ilm.Help,
|
||||
}
|
||||
|
||||
config.RegisterHelpSubSys(helpMap)
|
||||
@@ -362,7 +368,7 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
|
||||
}
|
||||
case config.IdentityOpenIDSubSys:
|
||||
if _, err := openid.LookupConfig(s,
|
||||
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil {
|
||||
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region()); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.IdentityLDAPSubSys:
|
||||
@@ -383,7 +389,7 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
|
||||
}
|
||||
case config.IdentityPluginSubSys:
|
||||
if _, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default],
|
||||
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil {
|
||||
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region()); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.SubnetSubSys:
|
||||
@@ -530,10 +536,11 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
||||
// but not federation.
|
||||
globalBucketFederation = etcdCfg.PathPrefix == "" && etcdCfg.Enabled
|
||||
|
||||
globalSite, err = config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default])
|
||||
siteCfg, err := config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default])
|
||||
if err != nil {
|
||||
configLogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err))
|
||||
}
|
||||
globalSite.Update(siteCfg)
|
||||
|
||||
globalAutoEncryption = crypto.LookupAutoEncryption() // Enable auto-encryption if enabled
|
||||
if globalAutoEncryption && GlobalKMS == nil {
|
||||
@@ -566,6 +573,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
var errs []error
|
||||
setDriveCounts := objAPI.SetDriveCounts()
|
||||
switch subSys {
|
||||
case config.APISubSys:
|
||||
@@ -574,7 +582,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
configLogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
|
||||
globalAPIConfig.init(apiConfig, setDriveCounts)
|
||||
globalAPIConfig.init(apiConfig, setDriveCounts, objAPI.Legacy())
|
||||
autoGenerateRootCredentials() // Generate the KMS root credentials here since we don't know whether API root access is disabled until now.
|
||||
setRemoteInstanceTransport(NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline))
|
||||
case config.CompressionSubSys:
|
||||
@@ -588,26 +596,29 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
case config.HealSubSys:
|
||||
healCfg, err := heal.LookupConfig(s[config.HealSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply heal config: %w", err)
|
||||
errs = append(errs, fmt.Errorf("Unable to apply heal config: %w", err))
|
||||
} else {
|
||||
globalHealConfig.Update(healCfg)
|
||||
}
|
||||
globalHealConfig.Update(healCfg)
|
||||
case config.BatchSubSys:
|
||||
batchCfg, err := batch.LookupConfig(s[config.BatchSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply batch config: %w", err)
|
||||
errs = append(errs, fmt.Errorf("Unable to apply batch config: %w", err))
|
||||
} else {
|
||||
globalBatchConfig.Update(batchCfg)
|
||||
}
|
||||
globalBatchConfig.Update(batchCfg)
|
||||
case config.ScannerSubSys:
|
||||
scannerCfg, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply scanner config: %w", err)
|
||||
errs = append(errs, fmt.Errorf("Unable to apply scanner config: %w", err))
|
||||
} else {
|
||||
// update dynamic scanner values.
|
||||
scannerIdleMode.Store(scannerCfg.IdleMode)
|
||||
scannerCycle.Store(scannerCfg.Cycle)
|
||||
scannerExcessObjectVersions.Store(scannerCfg.ExcessVersions)
|
||||
scannerExcessFolders.Store(scannerCfg.ExcessFolders)
|
||||
configLogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
|
||||
}
|
||||
// update dynamic scanner values.
|
||||
scannerIdleMode.Store(scannerCfg.IdleMode)
|
||||
scannerCycle.Store(scannerCfg.Cycle)
|
||||
scannerExcessObjectVersions.Store(scannerCfg.ExcessVersions)
|
||||
scannerExcessFolders.Store(scannerCfg.ExcessFolders)
|
||||
configLogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
|
||||
case config.LoggerWebhookSubSys:
|
||||
loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.LoggerWebhookSubSys)
|
||||
if err != nil {
|
||||
@@ -667,14 +678,12 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
configLogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
|
||||
break
|
||||
}
|
||||
// if we validated all setDriveCounts and it was successful
|
||||
// proceed to store the correct storage class globally.
|
||||
if i == len(setDriveCounts)-1 {
|
||||
if i == 0 {
|
||||
globalStorageClass.Update(sc)
|
||||
}
|
||||
}
|
||||
case config.SubnetSubSys:
|
||||
subnetConfig, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default], globalProxyTransport)
|
||||
subnetConfig, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default], globalRemoteTargetTransport)
|
||||
if err != nil {
|
||||
configLogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err))
|
||||
} else {
|
||||
@@ -693,11 +702,11 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
}
|
||||
}
|
||||
case config.DriveSubSys:
|
||||
if driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]); err != nil {
|
||||
driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default])
|
||||
if err != nil {
|
||||
configLogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err))
|
||||
} else {
|
||||
err := globalDriveConfig.Update(driveConfig)
|
||||
if err != nil {
|
||||
if err = globalDriveConfig.Update(driveConfig); err != nil {
|
||||
configLogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err))
|
||||
}
|
||||
}
|
||||
@@ -711,27 +720,32 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
case config.BrowserSubSys:
|
||||
browserCfg, err := browser.LookupConfig(s[config.BrowserSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply browser config: %w", err)
|
||||
errs = append(errs, fmt.Errorf("Unable to apply browser config: %w", err))
|
||||
} else {
|
||||
globalBrowserConfig.Update(browserCfg)
|
||||
}
|
||||
globalBrowserConfig.Update(browserCfg)
|
||||
case config.ILMSubSys:
|
||||
ilmCfg, err := ilm.LookupConfig(s[config.ILMSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply ilm config: %w", err)
|
||||
errs = append(errs, fmt.Errorf("Unable to apply ilm config: %w", err))
|
||||
} else {
|
||||
if globalTransitionState != nil {
|
||||
globalTransitionState.UpdateWorkers(ilmCfg.TransitionWorkers)
|
||||
}
|
||||
if globalExpiryState != nil {
|
||||
globalExpiryState.ResizeWorkers(ilmCfg.ExpirationWorkers)
|
||||
}
|
||||
globalILMConfig.update(ilmCfg)
|
||||
}
|
||||
if globalTransitionState != nil {
|
||||
globalTransitionState.UpdateWorkers(ilmCfg.TransitionWorkers)
|
||||
}
|
||||
if globalExpiryState != nil {
|
||||
globalExpiryState.ResizeWorkers(ilmCfg.ExpirationWorkers)
|
||||
}
|
||||
globalILMConfig.update(ilmCfg)
|
||||
}
|
||||
globalServerConfigMu.Lock()
|
||||
defer globalServerConfigMu.Unlock()
|
||||
if globalServerConfig != nil {
|
||||
globalServerConfig[subSys] = s[subSys]
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -746,41 +760,33 @@ func autoGenerateRootCredentials() {
|
||||
return
|
||||
}
|
||||
|
||||
if manager, ok := GlobalKMS.(kms.KeyManager); ok {
|
||||
stat, err := GlobalKMS.Stat(GlobalContext)
|
||||
if err != nil {
|
||||
kmsLogIf(GlobalContext, err, "Unable to generate root credentials using KMS")
|
||||
return
|
||||
}
|
||||
aKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root access key")})
|
||||
if errors.Is(err, kes.ErrNotAllowed) || errors.Is(err, errors.ErrUnsupported) {
|
||||
return // If we don't have permission to compute the HMAC, don't change the cred.
|
||||
}
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root access key using KMS")
|
||||
}
|
||||
|
||||
aKey, err := manager.HMAC(GlobalContext, stat.DefaultKey, []byte("root access key"))
|
||||
if errors.Is(err, kes.ErrNotAllowed) {
|
||||
return // If we don't have permission to compute the HMAC, don't change the cred.
|
||||
}
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root access key using KMS")
|
||||
}
|
||||
sKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root secret key")})
|
||||
if err != nil {
|
||||
// Here, we must have permission. Otherwise, we would have failed earlier.
|
||||
logger.Fatal(err, "Unable to generate root secret key using KMS")
|
||||
}
|
||||
|
||||
sKey, err := manager.HMAC(GlobalContext, stat.DefaultKey, []byte("root secret key"))
|
||||
if err != nil {
|
||||
// Here, we must have permission. Otherwise, we would have failed earlier.
|
||||
logger.Fatal(err, "Unable to generate root secret key using KMS")
|
||||
}
|
||||
accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root access key")
|
||||
}
|
||||
secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root secret key")
|
||||
}
|
||||
|
||||
accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root access key")
|
||||
}
|
||||
secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root secret key")
|
||||
}
|
||||
|
||||
logger.Info("Automatically generated root access key and secret key with the KMS")
|
||||
globalActiveCred = auth.Credentials{
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
}
|
||||
logger.Info("Automatically generated root access key and secret key with the KMS")
|
||||
globalActiveCred = auth.Credentials{
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -39,8 +39,8 @@ func TestServerConfig(t *testing.T) {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
|
||||
if globalSite.Region != globalMinioDefaultRegion {
|
||||
t.Errorf("Expecting region `us-east-1` found %s", globalSite.Region)
|
||||
if globalSite.Region() != globalMinioDefaultRegion {
|
||||
t.Errorf("Expecting region `us-east-1` found %s", globalSite.Region())
|
||||
}
|
||||
|
||||
// Set new region and verify.
|
||||
@@ -52,8 +52,8 @@ func TestServerConfig(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if site.Region != "us-west-1" {
|
||||
t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region)
|
||||
if site.Region() != "us-west-1" {
|
||||
t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region())
|
||||
}
|
||||
|
||||
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {
|
||||
|
||||
@@ -33,8 +33,8 @@ import (
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/event/target"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
"github.com/minio/pkg/v2/quick"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
"github.com/minio/pkg/v3/quick"
|
||||
)
|
||||
|
||||
// Save config file to corresponding backend
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"github.com/minio/minio/internal/config/policy/opa"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/quick"
|
||||
"github.com/minio/pkg/v3/quick"
|
||||
)
|
||||
|
||||
// FileLogger is introduced to workaround the dependency about logrus
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@@ -20,6 +20,7 @@ package cmd
|
||||
import (
|
||||
"container/ring"
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
@@ -28,8 +29,8 @@ import (
|
||||
"github.com/minio/minio/internal/logger/target/console"
|
||||
"github.com/minio/minio/internal/logger/target/types"
|
||||
"github.com/minio/minio/internal/pubsub"
|
||||
"github.com/minio/pkg/v2/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
"github.com/minio/pkg/v3/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
)
|
||||
|
||||
// number of log messages to buffer
|
||||
@@ -49,10 +50,10 @@ type HTTPConsoleLoggerSys struct {
|
||||
|
||||
// NewConsoleLogger - creates new HTTPConsoleLoggerSys with all nodes subscribed to
|
||||
// the console logging pub sub system
|
||||
func NewConsoleLogger(ctx context.Context) *HTTPConsoleLoggerSys {
|
||||
func NewConsoleLogger(ctx context.Context, w io.Writer) *HTTPConsoleLoggerSys {
|
||||
return &HTTPConsoleLoggerSys{
|
||||
pubsub: pubsub.New[log.Info, madmin.LogMask](8),
|
||||
console: console.New(),
|
||||
console: console.New(w),
|
||||
logBuf: ring.New(defaultLogBufferCount),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,12 +36,14 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
"github.com/minio/minio/internal/bucket/object/lock"
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/color"
|
||||
"github.com/minio/minio/internal/config/heal"
|
||||
"github.com/minio/minio/internal/event"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/pkg/v2/console"
|
||||
"github.com/minio/pkg/v3/console"
|
||||
uatomic "go.uber.org/atomic"
|
||||
)
|
||||
|
||||
@@ -51,7 +53,7 @@ const (
|
||||
dataScannerCompactLeastObject = 500 // Compact when there is less than this many objects in a branch.
|
||||
dataScannerCompactAtChildren = 10000 // Compact when there are this many children in a branch.
|
||||
dataScannerCompactAtFolders = dataScannerCompactAtChildren / 4 // Compact when this many subfolders in a single folder.
|
||||
dataScannerForceCompactAtFolders = 1_000_000 // Compact when this many subfolders in a single folder (even top level).
|
||||
dataScannerForceCompactAtFolders = 250_000 // Compact when this many subfolders in a single folder (even top level).
|
||||
dataScannerStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
|
||||
|
||||
healDeleteDangling = true
|
||||
@@ -62,11 +64,12 @@ var (
|
||||
globalHealConfig heal.Config
|
||||
|
||||
// Sleeper values are updated when config is loaded.
|
||||
scannerSleeper = newDynamicSleeper(2, time.Second, true) // Keep defaults same as config defaults
|
||||
scannerCycle = uatomic.NewDuration(dataScannerStartDelay)
|
||||
scannerIdleMode = uatomic.NewInt32(0) // default is throttled when idle
|
||||
scannerExcessObjectVersions = uatomic.NewInt64(100)
|
||||
scannerExcessFolders = uatomic.NewInt64(50000)
|
||||
scannerSleeper = newDynamicSleeper(2, time.Second, true) // Keep defaults same as config defaults
|
||||
scannerCycle = uatomic.NewDuration(dataScannerStartDelay)
|
||||
scannerIdleMode = uatomic.NewInt32(0) // default is throttled when idle
|
||||
scannerExcessObjectVersions = uatomic.NewInt64(100)
|
||||
scannerExcessObjectVersionsTotalSize = uatomic.NewInt64(1024 * 1024 * 1024 * 1024) // 1 TB
|
||||
scannerExcessFolders = uatomic.NewInt64(50000)
|
||||
)
|
||||
|
||||
// initDataScanner will start the scanner in the background.
|
||||
@@ -346,6 +349,7 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, basePath string, ca
|
||||
// No useful information...
|
||||
return cache, err
|
||||
}
|
||||
s.newCache.forceCompact(dataScannerCompactAtChildren)
|
||||
s.newCache.Info.LastUpdate = UTCNow()
|
||||
s.newCache.Info.NextCycle = cache.Info.NextCycle
|
||||
return s.newCache, nil
|
||||
@@ -951,10 +955,32 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi Obje
|
||||
}
|
||||
|
||||
versionID := oi.VersionID
|
||||
vcfg, _ := globalBucketVersioningSys.Get(i.bucket)
|
||||
rCfg, _ := globalBucketObjectLockSys.Get(i.bucket)
|
||||
replcfg, _ := getReplicationConfig(ctx, i.bucket)
|
||||
lcEvt := evalActionFromLifecycle(ctx, *i.lifeCycle, rCfg, replcfg, oi)
|
||||
|
||||
var vc *versioning.Versioning
|
||||
var lr objectlock.Retention
|
||||
var rcfg *replication.Config
|
||||
if !isMinioMetaBucketName(i.bucket) {
|
||||
vc, err = globalBucketVersioningSys.Get(i.bucket)
|
||||
if err != nil {
|
||||
scannerLogOnceIf(ctx, err, i.bucket)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket is object locked.
|
||||
lr, err = globalBucketObjectLockSys.Get(i.bucket)
|
||||
if err != nil {
|
||||
scannerLogOnceIf(ctx, err, i.bucket)
|
||||
return
|
||||
}
|
||||
|
||||
rcfg, err = getReplicationConfig(ctx, i.bucket)
|
||||
if err != nil {
|
||||
scannerLogOnceIf(ctx, err, i.bucket)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
lcEvt := evalActionFromLifecycle(ctx, *i.lifeCycle, lr, rcfg, oi)
|
||||
if i.debug {
|
||||
if versionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" lifecycle: %q (version-id=%s), Initial scan: %v\n", i.objectPath(), versionID, lcEvt.Action)
|
||||
@@ -968,11 +994,11 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi Obje
|
||||
// This can happen when,
|
||||
// - ExpireObjectAllVersions flag is enabled
|
||||
// - NoncurrentVersionExpiration is applicable
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAllVersionsAction:
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
|
||||
size = 0
|
||||
case lifecycle.DeleteAction:
|
||||
// On a non-versioned bucket, DeleteObject removes the only version permanently.
|
||||
if !vcfg.PrefixEnabled(oi.Name) {
|
||||
if !vc.PrefixEnabled(oi.Name) {
|
||||
size = 0
|
||||
}
|
||||
}
|
||||
@@ -1065,7 +1091,7 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi
|
||||
}
|
||||
|
||||
// Check if we have many versions after applyNewerNoncurrentVersionLimit.
|
||||
if len(objInfos) > int(scannerExcessObjectVersions.Load()) {
|
||||
if len(objInfos) >= int(scannerExcessObjectVersions.Load()) {
|
||||
// Notify object accessed via a GET request.
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectManyVersions,
|
||||
@@ -1089,6 +1115,39 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi
|
||||
})
|
||||
}
|
||||
|
||||
cumulativeSize := int64(0)
|
||||
for _, objInfo := range objInfos {
|
||||
cumulativeSize += objInfo.Size
|
||||
}
|
||||
// Check if the cumulative size of all versions of this object is high.
|
||||
if cumulativeSize >= scannerExcessObjectVersionsTotalSize.Load() {
|
||||
// Notify object accessed via a GET request.
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectLargeVersions,
|
||||
BucketName: i.bucket,
|
||||
Object: ObjectInfo{
|
||||
Name: i.objectPath(),
|
||||
},
|
||||
UserAgent: "Scanner",
|
||||
Host: globalLocalNodeName,
|
||||
RespElements: map[string]string{
|
||||
"x-minio-versions-count": strconv.Itoa(len(objInfos)),
|
||||
"x-minio-versions-size": strconv.FormatInt(cumulativeSize, 10),
|
||||
},
|
||||
})
|
||||
|
||||
auditLogInternal(context.Background(), AuditLogOptions{
|
||||
Event: "scanner:largeversions",
|
||||
APIName: "Scanner",
|
||||
Bucket: i.bucket,
|
||||
Object: i.objectPath(),
|
||||
Tags: map[string]interface{}{
|
||||
"x-minio-versions-count": strconv.Itoa(len(objInfos)),
|
||||
"x-minio-versions-size": strconv.FormatInt(cumulativeSize, 10),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return objInfos, nil
|
||||
}
|
||||
|
||||
@@ -1104,7 +1163,7 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi Object
|
||||
|
||||
// Note: objDeleted is true if and only if action ==
|
||||
// lifecycle.DeleteAllVersionsAction
|
||||
if action == lifecycle.DeleteAllVersionsAction {
|
||||
if action.DeleteAll() {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
@@ -1141,17 +1200,15 @@ func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr loc
|
||||
console.Debugf(applyActionsLogPrefix+" lifecycle: Secondary scan: %v\n", event.Action)
|
||||
}
|
||||
|
||||
if event.Action == lifecycle.NoneAction {
|
||||
return event
|
||||
}
|
||||
|
||||
if obj.IsLatest && event.Action == lifecycle.DeleteAllVersionsAction {
|
||||
if lr.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
|
||||
switch event.Action {
|
||||
case lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
|
||||
// Skip if bucket has object locking enabled; To prevent the
|
||||
// possibility of violating an object retention on one of the
|
||||
// noncurrent versions of this object.
|
||||
if lr.LockEnabled {
|
||||
return lifecycle.Event{Action: lifecycle.NoneAction}
|
||||
}
|
||||
}
|
||||
|
||||
switch event.Action {
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteRestoredVersionAction:
|
||||
// Defensive code, should never happen
|
||||
if obj.VersionID == "" {
|
||||
@@ -1176,36 +1233,29 @@ func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr loc
|
||||
}
|
||||
|
||||
func applyTransitionRule(event lifecycle.Event, src lcEventSrc, obj ObjectInfo) bool {
|
||||
if obj.DeleteMarker {
|
||||
if obj.DeleteMarker || obj.IsDir {
|
||||
return false
|
||||
}
|
||||
globalTransitionState.queueTransitionTask(obj, event, src)
|
||||
return true
|
||||
}
|
||||
|
||||
func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) bool {
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Note: DeleteAllVersions action is not supported for
|
||||
// transitioned objects
|
||||
globalScannerMetrics.timeILM(lcEvent.Action)(1)
|
||||
}()
|
||||
|
||||
if err = expireTransitionedObject(ctx, objLayer, &obj, lcEvent, src); err != nil {
|
||||
func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) (ok bool) {
|
||||
timeILM := globalScannerMetrics.timeILM(lcEvent.Action)
|
||||
if err := expireTransitionedObject(ctx, objLayer, &obj, lcEvent, src); err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return false
|
||||
}
|
||||
ilmLogIf(ctx, err)
|
||||
return false
|
||||
}
|
||||
timeILM(1)
|
||||
|
||||
// Notification already sent in *expireTransitionedObject*, just return 'true' here.
|
||||
return true
|
||||
}
|
||||
|
||||
func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) bool {
|
||||
func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) (ok bool) {
|
||||
traceFn := globalLifecycleSys.trace(obj)
|
||||
opts := ObjectOptions{
|
||||
Expiration: ExpirationOptions{Expire: true},
|
||||
@@ -1227,17 +1277,19 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
|
||||
dobj ObjectInfo
|
||||
err error
|
||||
)
|
||||
|
||||
timeILM := globalScannerMetrics.timeILM(lcEvent.Action)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if lcEvent.Action != lifecycle.NoneAction {
|
||||
numVersions := uint64(1)
|
||||
if lcEvent.Action == lifecycle.DeleteAllVersionsAction {
|
||||
if lcEvent.Action.DeleteAll() {
|
||||
numVersions = uint64(obj.NumVersions)
|
||||
}
|
||||
globalScannerMetrics.timeILM(lcEvent.Action)(numVersions)
|
||||
timeILM(numVersions)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -1262,8 +1314,11 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
|
||||
if obj.DeleteMarker {
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
if lcEvent.Action.DeleteAll() {
|
||||
switch lcEvent.Action {
|
||||
case lifecycle.DeleteAllVersionsAction:
|
||||
eventName = event.ObjectRemovedDeleteAllVersions
|
||||
case lifecycle.DelMarkerDeleteAllVersionsAction:
|
||||
eventName = event.ILMDelMarkerExpirationDelete
|
||||
}
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
@@ -1288,7 +1343,7 @@ func applyLifecycleAction(event lifecycle.Event, src lcEventSrc, obj ObjectInfo)
|
||||
switch action := event.Action; action {
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction,
|
||||
lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction,
|
||||
lifecycle.DeleteAllVersionsAction:
|
||||
lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
|
||||
success = applyExpiryRule(event, src, obj)
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
success = applyTransitionRule(event, src, obj)
|
||||
@@ -1388,48 +1443,7 @@ func (d *dynamicSleeper) Timer(ctx context.Context) func() {
|
||||
t := time.Now()
|
||||
return func() {
|
||||
doneAt := time.Now()
|
||||
for {
|
||||
// Grab current values
|
||||
d.mu.RLock()
|
||||
minWait, maxWait := d.minSleep, d.maxSleep
|
||||
factor := d.factor
|
||||
cycle := d.cycle
|
||||
d.mu.RUnlock()
|
||||
elapsed := doneAt.Sub(t)
|
||||
// Don't sleep for really small amount of time
|
||||
wantSleep := time.Duration(float64(elapsed) * factor)
|
||||
if wantSleep <= minWait {
|
||||
return
|
||||
}
|
||||
if maxWait > 0 && wantSleep > maxWait {
|
||||
wantSleep = maxWait
|
||||
}
|
||||
timer := time.NewTimer(wantSleep)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
if d.isScanner {
|
||||
globalScannerMetrics.incTime(scannerMetricYield, wantSleep)
|
||||
}
|
||||
return
|
||||
case <-timer.C:
|
||||
if d.isScanner {
|
||||
globalScannerMetrics.incTime(scannerMetricYield, wantSleep)
|
||||
}
|
||||
return
|
||||
case <-cycle:
|
||||
if !timer.Stop() {
|
||||
// We expired.
|
||||
<-timer.C
|
||||
if d.isScanner {
|
||||
globalScannerMetrics.incTime(scannerMetricYield, wantSleep)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
d.Sleep(ctx, doneAt.Sub(t))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,12 +20,15 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
"github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
)
|
||||
|
||||
@@ -141,3 +144,96 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalActionFromLifecycle(t *testing.T) {
|
||||
// Tests cover only ExpiredObjectDeleteAllVersions and DelMarkerExpiration actions
|
||||
obj := ObjectInfo{
|
||||
Name: "foo",
|
||||
ModTime: time.Now().Add(-31 * 24 * time.Hour),
|
||||
Size: 100 << 20,
|
||||
VersionID: uuid.New().String(),
|
||||
IsLatest: true,
|
||||
NumVersions: 4,
|
||||
}
|
||||
delMarker := ObjectInfo{
|
||||
Name: "foo-deleted",
|
||||
ModTime: time.Now().Add(-61 * 24 * time.Hour),
|
||||
Size: 0,
|
||||
VersionID: uuid.New().String(),
|
||||
IsLatest: true,
|
||||
DeleteMarker: true,
|
||||
NumVersions: 4,
|
||||
}
|
||||
deleteAllILM := `<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<Expiration>
|
||||
<Days>30</Days>
|
||||
<ExpiredObjectAllVersions>true</ExpiredObjectAllVersions>
|
||||
</Expiration>
|
||||
<Filter></Filter>
|
||||
<Status>Enabled</Status>
|
||||
<ID>DeleteAllVersions</ID>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>`
|
||||
delMarkerILM := `<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<ID>DelMarkerExpiration</ID>
|
||||
<Filter></Filter>
|
||||
<Status>Enabled</Status>
|
||||
<DelMarkerExpiration>
|
||||
<Days>60</Days>
|
||||
</DelMarkerExpiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>`
|
||||
deleteAllLc, err := lifecycle.ParseLifecycleConfig(strings.NewReader(deleteAllILM))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse deleteAllILM test ILM policy %v", err)
|
||||
}
|
||||
delMarkerLc, err := lifecycle.ParseLifecycleConfig(strings.NewReader(delMarkerILM))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse delMarkerILM test ILM policy %v", err)
|
||||
}
|
||||
tests := []struct {
|
||||
ilm lifecycle.Lifecycle
|
||||
retention lock.Retention
|
||||
obj ObjectInfo
|
||||
want lifecycle.Action
|
||||
}{
|
||||
{
|
||||
// with object locking
|
||||
ilm: *deleteAllLc,
|
||||
retention: lock.Retention{LockEnabled: true},
|
||||
obj: obj,
|
||||
want: lifecycle.NoneAction,
|
||||
},
|
||||
{
|
||||
// without object locking
|
||||
ilm: *deleteAllLc,
|
||||
retention: lock.Retention{},
|
||||
obj: obj,
|
||||
want: lifecycle.DeleteAllVersionsAction,
|
||||
},
|
||||
{
|
||||
// with object locking
|
||||
ilm: *delMarkerLc,
|
||||
retention: lock.Retention{LockEnabled: true},
|
||||
obj: delMarker,
|
||||
want: lifecycle.NoneAction,
|
||||
},
|
||||
{
|
||||
// without object locking
|
||||
ilm: *delMarkerLc,
|
||||
retention: lock.Retention{},
|
||||
obj: delMarker,
|
||||
want: lifecycle.DelMarkerDeleteAllVersionsAction,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("TestEvalAction-%d", i), func(t *testing.T) {
|
||||
if got := evalActionFromLifecycle(context.TODO(), test.ilm, test.retention, nil, test.obj); got.Action != test.want {
|
||||
t.Fatalf("Expected %v but got %v", test.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -36,7 +35,6 @@ import (
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
"github.com/valyala/bytebufferpool"
|
||||
)
|
||||
@@ -731,6 +729,53 @@ func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compact
|
||||
}
|
||||
}
|
||||
|
||||
// forceCompact will force compact the cache of the top entry.
|
||||
// If the number of children is more than limit*100, it will compact self.
|
||||
// When above the limit a cleanup will also be performed to remove any possible abandoned entries.
|
||||
func (d *dataUsageCache) forceCompact(limit int) {
|
||||
if d == nil || len(d.Cache) <= limit {
|
||||
return
|
||||
}
|
||||
top := hashPath(d.Info.Name).Key()
|
||||
topE := d.find(top)
|
||||
if topE == nil {
|
||||
scannerLogIf(GlobalContext, errors.New("forceCompact: root not found"))
|
||||
return
|
||||
}
|
||||
// If off by 2 orders of magnitude, compact self and log error.
|
||||
if len(topE.Children) > dataScannerForceCompactAtFolders {
|
||||
// If we still have too many children, compact self.
|
||||
scannerLogOnceIf(GlobalContext, fmt.Errorf("forceCompact: %q has %d children. Force compacting. Expect reduced scanner performance", d.Info.Name, len(topE.Children)), d.Info.Name)
|
||||
d.reduceChildrenOf(hashPath(d.Info.Name), limit, true)
|
||||
}
|
||||
if len(d.Cache) <= limit {
|
||||
return
|
||||
}
|
||||
|
||||
// Check for abandoned entries.
|
||||
found := make(map[string]struct{}, len(d.Cache))
|
||||
|
||||
// Mark all children recursively
|
||||
var mark func(entry dataUsageEntry)
|
||||
mark = func(entry dataUsageEntry) {
|
||||
for k := range entry.Children {
|
||||
found[k] = struct{}{}
|
||||
if ch, ok := d.Cache[k]; ok {
|
||||
mark(ch)
|
||||
}
|
||||
}
|
||||
}
|
||||
found[top] = struct{}{}
|
||||
mark(*topE)
|
||||
|
||||
// Delete all entries not found.
|
||||
for k := range d.Cache {
|
||||
if _, ok := found[k]; !ok {
|
||||
delete(d.Cache, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StringAll returns a detailed string representation of all entries in the cache.
|
||||
func (d *dataUsageCache) StringAll() string {
|
||||
// Remove bloom filter from print.
|
||||
@@ -1005,11 +1050,23 @@ func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string)
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, ObjectOptions{NoLock: true})
|
||||
r, err := store.GetObjectNInfo(ctx, minioMetaBucket, pathJoin(bucketMetaPrefix, name), nil, http.Header{}, ObjectOptions{NoLock: true})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectNotFound, BucketNotFound:
|
||||
return false, nil
|
||||
r, err = store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, ObjectOptions{NoLock: true})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectNotFound, BucketNotFound:
|
||||
return false, nil
|
||||
case InsufficientReadQuorum, StorageErr:
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
err = d.deserialize(r)
|
||||
r.Close()
|
||||
return err != nil, nil
|
||||
case InsufficientReadQuorum, StorageErr:
|
||||
return true, nil
|
||||
}
|
||||
@@ -1070,24 +1127,11 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
||||
}
|
||||
|
||||
save := func(name string, timeout time.Duration) error {
|
||||
hr, err := hash.NewReader(ctx, bytes.NewReader(buf.Bytes()), int64(buf.Len()), "", "", int64(buf.Len()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Abandon if more than a minute, so we don't hold up scanner.
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
_, err = store.PutObject(ctx,
|
||||
dataUsageBucket,
|
||||
name,
|
||||
NewPutObjReader(hr),
|
||||
ObjectOptions{NoLock: true})
|
||||
if isErrBucketNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
return saveConfig(ctx, store, pathJoin(bucketMetaPrefix, name), buf.Bytes())
|
||||
}
|
||||
defer save(name+".bkp", 5*time.Second) // Keep a backup as well
|
||||
|
||||
|
||||
@@ -79,12 +79,12 @@ func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket
|
||||
prefixUsageCache.InitOnce(30*time.Second,
|
||||
// No need to fail upon Update() error, fallback to old value.
|
||||
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
|
||||
func() (map[string]uint64, error) {
|
||||
func(ctx context.Context) (map[string]uint64, error) {
|
||||
m := make(map[string]uint64)
|
||||
for _, pool := range z.serverPools {
|
||||
for _, er := range pool.sets {
|
||||
// Load bucket usage prefixes
|
||||
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, done := context.WithTimeout(ctx, 2*time.Second)
|
||||
ok := cache.load(ctx, er, bucket+slashSeparator+dataUsageCacheName) == nil
|
||||
done()
|
||||
if ok {
|
||||
@@ -107,7 +107,7 @@ func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket
|
||||
},
|
||||
)
|
||||
|
||||
return prefixUsageCache.Get()
|
||||
return prefixUsageCache.GetWithCtx(ctx)
|
||||
}
|
||||
|
||||
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) {
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Data types used for returning dummy tagging XML.
|
||||
|
||||
@@ -110,7 +110,7 @@ func kmsKeyIDFromMetadata(metadata map[string]string) string {
|
||||
//
|
||||
// DecryptETags uses a KMS bulk decryption API, if available, which
|
||||
// is more efficient than decrypting ETags sequentually.
|
||||
func DecryptETags(ctx context.Context, k kms.KMS, objects []ObjectInfo) error {
|
||||
func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error {
|
||||
const BatchSize = 250 // We process the objects in batches - 250 is a reasonable default.
|
||||
var (
|
||||
metadata = make([]map[string]string, 0, BatchSize)
|
||||
@@ -267,7 +267,11 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oldKey, err := GlobalKMS.DecryptKey(keyID, kmsKey, kms.Context{bucket: path.Join(bucket, object)})
|
||||
oldKey, err := GlobalKMS.Decrypt(ctx, &kms.DecryptRequest{
|
||||
Name: keyID,
|
||||
Ciphertext: kmsKey,
|
||||
AssociatedData: kms.Context{bucket: path.Join(bucket, object)},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -276,7 +280,10 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
|
||||
return err
|
||||
}
|
||||
|
||||
newKey, err := GlobalKMS.GenerateKey(ctx, "", kms.Context{bucket: path.Join(bucket, object)})
|
||||
newKey, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: GlobalKMS.DefaultKey,
|
||||
AssociatedData: kms.Context{bucket: path.Join(bucket, object)},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -312,7 +319,10 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
|
||||
if _, ok := kmsCtx[bucket]; !ok {
|
||||
kmsCtx[bucket] = path.Join(bucket, object)
|
||||
}
|
||||
newKey, err := GlobalKMS.GenerateKey(ctx, newKeyID, kmsCtx)
|
||||
newKey, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: newKeyID,
|
||||
AssociatedData: kmsCtx,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -352,7 +362,9 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
|
||||
if GlobalKMS == nil {
|
||||
return crypto.ObjectKey{}, errKMSNotConfigured
|
||||
}
|
||||
key, err := GlobalKMS.GenerateKey(ctx, "", kms.Context{bucket: path.Join(bucket, object)})
|
||||
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
AssociatedData: kms.Context{bucket: path.Join(bucket, object)},
|
||||
})
|
||||
if err != nil {
|
||||
return crypto.ObjectKey{}, err
|
||||
}
|
||||
@@ -379,7 +391,10 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
|
||||
if _, ok := kmsCtx[bucket]; !ok {
|
||||
kmsCtx[bucket] = path.Join(bucket, object)
|
||||
}
|
||||
key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsCtx)
|
||||
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: keyID,
|
||||
AssociatedData: kmsCtx,
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
return crypto.ObjectKey{}, errKMSKeyNotFound
|
||||
@@ -475,11 +490,10 @@ func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, m
|
||||
func decryptObjectMeta(key []byte, bucket, object string, metadata map[string]string) ([]byte, error) {
|
||||
switch kind, _ := crypto.IsEncrypted(metadata); kind {
|
||||
case crypto.S3:
|
||||
KMS := GlobalKMS
|
||||
if KMS == nil {
|
||||
if GlobalKMS == nil {
|
||||
return nil, errKMSNotConfigured
|
||||
}
|
||||
objectKey, err := crypto.S3.UnsealObjectKey(KMS, metadata, bucket, object)
|
||||
objectKey, err := crypto.S3.UnsealObjectKey(GlobalKMS, metadata, bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1011,7 +1025,9 @@ func DecryptObjectInfo(info *ObjectInfo, r *http.Request) (encrypted bool, err e
|
||||
if encrypted {
|
||||
if crypto.SSEC.IsEncrypted(info.UserDefined) {
|
||||
if !(crypto.SSEC.IsRequested(headers) || crypto.SSECopy.IsRequested(headers)) {
|
||||
return encrypted, errEncryptedObject
|
||||
if r.Header.Get(xhttp.MinIOSourceReplicationRequest) != "true" {
|
||||
return encrypted, errEncryptedObject
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1063,13 +1079,16 @@ func metadataEncrypter(key crypto.ObjectKey) objectMetaEncryptFn {
|
||||
}
|
||||
|
||||
// metadataDecrypter reverses metadataEncrypter.
|
||||
func (o *ObjectInfo) metadataDecrypter() objectMetaDecryptFn {
|
||||
func (o *ObjectInfo) metadataDecrypter(h http.Header) objectMetaDecryptFn {
|
||||
return func(baseKey string, input []byte) ([]byte, error) {
|
||||
if len(input) == 0 {
|
||||
return input, nil
|
||||
}
|
||||
|
||||
key, err := decryptObjectMeta(nil, o.Bucket, o.Name, o.UserDefined)
|
||||
var key []byte
|
||||
if k, err := crypto.SSEC.ParseHTTP(h); err == nil {
|
||||
key = k[:]
|
||||
}
|
||||
key, err := decryptObjectMeta(key, o.Bucket, o.Name, o.UserDefined)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1081,13 +1100,13 @@ func (o *ObjectInfo) metadataDecrypter() objectMetaDecryptFn {
|
||||
|
||||
// decryptChecksums will attempt to decode checksums and return it/them if set.
|
||||
// if part > 0, and we have the checksum for the part that will be returned.
|
||||
func (o *ObjectInfo) decryptPartsChecksums() {
|
||||
func (o *ObjectInfo) decryptPartsChecksums(h http.Header) {
|
||||
data := o.Checksum
|
||||
if len(data) == 0 {
|
||||
return
|
||||
}
|
||||
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
|
||||
decrypted, err := o.metadataDecrypter()("object-checksum", data)
|
||||
decrypted, err := o.metadataDecrypter(h)("object-checksum", data)
|
||||
if err != nil {
|
||||
encLogIf(GlobalContext, err)
|
||||
return
|
||||
@@ -1143,15 +1162,17 @@ func (o *ObjectInfo) metadataEncryptFn(headers http.Header) (objectMetaEncryptFn
|
||||
|
||||
// decryptChecksums will attempt to decode checksums and return it/them if set.
|
||||
// if part > 0, and we have the checksum for the part that will be returned.
|
||||
func (o *ObjectInfo) decryptChecksums(part int) map[string]string {
|
||||
func (o *ObjectInfo) decryptChecksums(part int, h http.Header) map[string]string {
|
||||
data := o.Checksum
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
|
||||
decrypted, err := o.metadataDecrypter()("object-checksum", data)
|
||||
decrypted, err := o.metadataDecrypter(h)("object-checksum", data)
|
||||
if err != nil {
|
||||
encLogIf(GlobalContext, err)
|
||||
if err != crypto.ErrSecretKeyMismatch {
|
||||
encLogIf(GlobalContext, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
data = decrypted
|
||||
|
||||
@@ -23,14 +23,13 @@ import (
|
||||
"net/url"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/pkg/v2/ellipses"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/ellipses"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
// This file implements and supports ellipses pattern for
|
||||
@@ -134,7 +133,7 @@ func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.Ar
|
||||
// on each index, this function also determines the final set size
|
||||
// The final set size has the affinity towards choosing smaller
|
||||
// indexes (total sets)
|
||||
func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint64, argPatterns []ellipses.ArgPattern) (setIndexes [][]uint64, err error) {
|
||||
func getSetIndexes(args []string, totalSizes []uint64, setDriveCount uint64, argPatterns []ellipses.ArgPattern) (setIndexes [][]uint64, err error) {
|
||||
if len(totalSizes) == 0 || len(args) == 0 {
|
||||
return nil, errInvalidArgument
|
||||
}
|
||||
@@ -142,7 +141,7 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6
|
||||
setIndexes = make([][]uint64, len(totalSizes))
|
||||
for _, totalSize := range totalSizes {
|
||||
// Check if totalSize has minimum range upto setSize
|
||||
if totalSize < setSizes[0] || totalSize < customSetDriveCount {
|
||||
if totalSize < setSizes[0] || totalSize < setDriveCount {
|
||||
msg := fmt.Sprintf("Incorrect number of endpoints provided %s", args)
|
||||
return nil, config.ErrInvalidNumberOfErasureEndpoints(nil).Msg(msg)
|
||||
}
|
||||
@@ -167,11 +166,11 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6
|
||||
var setSize uint64
|
||||
// Custom set drive count allows to override automatic distribution.
|
||||
// only meant if you want to further optimize drive distribution.
|
||||
if customSetDriveCount > 0 {
|
||||
if setDriveCount > 0 {
|
||||
msg := fmt.Sprintf("Invalid set drive count. Acceptable values for %d number drives are %d", commonSize, setCounts)
|
||||
var found bool
|
||||
for _, ss := range setCounts {
|
||||
if ss == customSetDriveCount {
|
||||
if ss == setDriveCount {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
@@ -180,8 +179,7 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6
|
||||
}
|
||||
|
||||
// No automatic symmetry calculation expected, user is on their own
|
||||
setSize = customSetDriveCount
|
||||
globalCustomErasureDriveCount = true
|
||||
setSize = setDriveCount
|
||||
} else {
|
||||
// Returns possible set counts with symmetry.
|
||||
setCounts = possibleSetCountsWithSymmetry(setCounts, argPatterns)
|
||||
@@ -256,7 +254,7 @@ func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 {
|
||||
// Parses all arguments and returns an endpointSet which is a collection
|
||||
// of endpoints following the ellipses pattern, this is what is used
|
||||
// by the object layer for initializing itself.
|
||||
func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSet, err error) {
|
||||
func parseEndpointSet(setDriveCount uint64, args ...string) (ep endpointSet, err error) {
|
||||
argPatterns := make([]ellipses.ArgPattern, len(args))
|
||||
for i, arg := range args {
|
||||
patterns, perr := ellipses.FindEllipsesPatterns(arg)
|
||||
@@ -266,7 +264,7 @@ func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSe
|
||||
argPatterns[i] = patterns
|
||||
}
|
||||
|
||||
ep.setIndexes, err = getSetIndexes(args, getTotalSizes(argPatterns), customSetDriveCount, argPatterns)
|
||||
ep.setIndexes, err = getSetIndexes(args, getTotalSizes(argPatterns), setDriveCount, argPatterns)
|
||||
if err != nil {
|
||||
return endpointSet{}, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||
}
|
||||
@@ -281,23 +279,14 @@ func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSe
|
||||
// specific set size.
|
||||
// For example: {1...64} is divided into 4 sets each of size 16.
|
||||
// This applies to even distributed setup syntax as well.
|
||||
func GetAllSets(args ...string) ([][]string, error) {
|
||||
var customSetDriveCount uint64
|
||||
if v := env.Get(EnvErasureSetDriveCount, ""); v != "" {
|
||||
driveCount, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return nil, config.ErrInvalidErasureSetSize(err)
|
||||
}
|
||||
customSetDriveCount = uint64(driveCount)
|
||||
}
|
||||
|
||||
func GetAllSets(setDriveCount uint64, args ...string) ([][]string, error) {
|
||||
var setArgs [][]string
|
||||
if !ellipses.HasEllipses(args...) {
|
||||
var setIndexes [][]uint64
|
||||
// Check if we have more one args.
|
||||
if len(args) > 1 {
|
||||
var err error
|
||||
setIndexes, err = getSetIndexes(args, []uint64{uint64(len(args))}, customSetDriveCount, nil)
|
||||
setIndexes, err = getSetIndexes(args, []uint64{uint64(len(args))}, setDriveCount, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -311,7 +300,7 @@ func GetAllSets(args ...string) ([][]string, error) {
|
||||
}
|
||||
setArgs = s.Get()
|
||||
} else {
|
||||
s, err := parseEndpointSet(customSetDriveCount, args...)
|
||||
s, err := parseEndpointSet(setDriveCount, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -336,8 +325,6 @@ const (
|
||||
EnvErasureSetDriveCount = "MINIO_ERASURE_SET_DRIVE_COUNT"
|
||||
)
|
||||
|
||||
var globalCustomErasureDriveCount = false
|
||||
|
||||
type node struct {
|
||||
nodeName string
|
||||
disks []string
|
||||
@@ -366,8 +353,13 @@ func (el *endpointsList) add(arg string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type poolArgs struct {
|
||||
args []string
|
||||
setDriveCount uint64
|
||||
}
|
||||
|
||||
// buildDisksLayoutFromConfFile supports with and without ellipses transparently.
|
||||
func buildDisksLayoutFromConfFile(pools [][]string) (layout disksLayout, err error) {
|
||||
func buildDisksLayoutFromConfFile(pools []poolArgs) (layout disksLayout, err error) {
|
||||
if len(pools) == 0 {
|
||||
return layout, errInvalidArgument
|
||||
}
|
||||
@@ -375,7 +367,7 @@ func buildDisksLayoutFromConfFile(pools [][]string) (layout disksLayout, err err
|
||||
for _, list := range pools {
|
||||
var endpointsList endpointsList
|
||||
|
||||
for _, arg := range list {
|
||||
for _, arg := range list.args {
|
||||
switch {
|
||||
case ellipses.HasList(arg):
|
||||
patterns, err := ellipses.FindListPatterns(arg)
|
||||
@@ -436,7 +428,7 @@ func buildDisksLayoutFromConfFile(pools [][]string) (layout disksLayout, err err
|
||||
}
|
||||
}
|
||||
|
||||
setArgs, err := GetAllSets(eps...)
|
||||
setArgs, err := GetAllSets(list.setDriveCount, eps...)
|
||||
if err != nil {
|
||||
return layout, err
|
||||
}
|
||||
@@ -469,15 +461,21 @@ func mergeDisksLayoutFromArgs(args []string, ctxt *serverCtxt) (err error) {
|
||||
|
||||
var setArgs [][]string
|
||||
|
||||
v, err := env.GetInt(EnvErasureSetDriveCount, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setDriveCount := uint64(v)
|
||||
|
||||
// None of the args have ellipses use the old style.
|
||||
if ok {
|
||||
setArgs, err = GetAllSets(args...)
|
||||
setArgs, err = GetAllSets(setDriveCount, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctxt.Layout = disksLayout{
|
||||
legacy: true,
|
||||
pools: []poolDisksLayout{{layout: setArgs}},
|
||||
pools: []poolDisksLayout{{layout: setArgs, cmdline: strings.Join(args, " ")}},
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -487,7 +485,7 @@ func mergeDisksLayoutFromArgs(args []string, ctxt *serverCtxt) (err error) {
|
||||
// TODO: support SNSD deployments to be decommissioned in future
|
||||
return fmt.Errorf("all args must have ellipses for pool expansion (%w) args: %s", errInvalidArgument, args)
|
||||
}
|
||||
setArgs, err = GetAllSets(arg)
|
||||
setArgs, err = GetAllSets(setDriveCount, arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user