Compare commits
438 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c4239ced22 | ||
|
|
f85c28e960 | ||
|
|
f7e176d4ca | ||
|
|
72a0d14195 | ||
|
|
6abe4128d7 | ||
|
|
ed5ed7e490 | ||
|
|
51410c9023 | ||
|
|
96ca402dcd | ||
|
|
3da7c9cce3 | ||
|
|
a14e19ec54 | ||
|
|
e091dde041 | ||
|
|
d10bb7e1b6 | ||
|
|
7ebceacac6 | ||
|
|
1593cb615d | ||
|
|
86a41d1631 | ||
|
|
d4157b819c | ||
|
|
e0aceca1b7 | ||
|
|
87804624fe | ||
|
|
e029f8a9d7 | ||
|
|
1bc6681176 | ||
|
|
28322124e2 | ||
|
|
cbfe9de3e7 | ||
|
|
dc86b8d9d4 | ||
|
|
ba70118e2b | ||
|
|
cb1d3e50f7 | ||
|
|
ded0b19d97 | ||
|
|
d0bb3dd136 | ||
|
|
ab7714b01e | ||
|
|
e5b18df6db | ||
|
|
0abfd1bcb1 | ||
|
|
6186d11761 | ||
|
|
e8b457e8a6 | ||
|
|
afea40cc0f | ||
|
|
402b798f1b | ||
|
|
4759532e90 | ||
|
|
7f1e1713ab | ||
|
|
b2c5819dbc | ||
|
|
2b0156b1fc | ||
|
|
f6f0807c86 | ||
|
|
0c53d86017 | ||
|
|
6a6ee46d76 | ||
|
|
974cbb3bb7 | ||
|
|
03e996320e | ||
|
|
78fcb76294 | ||
|
|
3d152015eb | ||
|
|
ade8925155 | ||
|
|
05a6c170bf | ||
|
|
e1c2344591 | ||
|
|
48a591e9b4 | ||
|
|
fa5d9c02ef | ||
|
|
5bd27346ac | ||
|
|
3c82cf9327 | ||
|
|
70d40083e9 | ||
|
|
8a30967542 | ||
|
|
229f04ab79 | ||
|
|
1123dc3676 | ||
|
|
5bf41aff17 | ||
|
|
e47d787adb | ||
|
|
398ffb1136 | ||
|
|
5862582cd7 | ||
|
|
e36b1146d6 | ||
|
|
c28a4beeb7 | ||
|
|
15ab0808b3 | ||
|
|
3bae73fb42 | ||
|
|
bc527eceda | ||
|
|
b963f36e1e | ||
|
|
cdd7512a2e | ||
|
|
a6d5287310 | ||
|
|
22822f4151 | ||
|
|
0b7aa6af87 | ||
|
|
8c9ab85cfa | ||
|
|
b1c849bedc | ||
|
|
fb24bcfee0 | ||
|
|
8268c12cfb | ||
|
|
3f39da48ea | ||
|
|
9d5cdaa2e3 | ||
|
|
84e122c5c3 | ||
|
|
261111e728 | ||
|
|
0f1e8db4c5 | ||
|
|
64e803b136 | ||
|
|
a0f9e9f661 | ||
|
|
b6b7cddc9c | ||
|
|
241be9709c | ||
|
|
85f08d7752 | ||
|
|
6be88a2b99 | ||
|
|
060276932d | ||
|
|
6224849fd1 | ||
|
|
9b79eec29e | ||
|
|
c2e318dd40 | ||
|
|
69258d5945 | ||
|
|
d7ef6315ae | ||
|
|
aaf4fb1184 | ||
|
|
f05641c3c6 | ||
|
|
7a34c88d73 | ||
|
|
6c746843ac | ||
|
|
bb07df7e7b | ||
|
|
1cb824039e | ||
|
|
504e52b45e | ||
|
|
38c0840834 | ||
|
|
c65e67c357 | ||
|
|
fb2360ff88 | ||
|
|
1a2de1bdde | ||
|
|
993b97f1db | ||
|
|
1c4d28d7af | ||
|
|
af55f37b27 | ||
|
|
2d67c26794 | ||
|
|
006cacfefb | ||
|
|
c28f09d4a7 | ||
|
|
73992d2b9f | ||
|
|
a8f143298f | ||
|
|
2d44c161c7 | ||
|
|
9511056f44 | ||
|
|
fb4ad000b6 | ||
|
|
a8ff12bc72 | ||
|
|
1e1bd3afd9 | ||
|
|
7b239ae154 | ||
|
|
8a11282522 | ||
|
|
85c3db3a93 | ||
|
|
37383ecd09 | ||
|
|
6378ca10a4 | ||
|
|
9e81ccd2d9 | ||
|
|
72cff79c8a | ||
|
|
a5702f978e | ||
|
|
4687c4616f | ||
|
|
d8dfb57d5c | ||
|
|
b07c58aa05 | ||
|
|
cc0c41d216 | ||
|
|
f1302c40fe | ||
|
|
3b1aa40372 | ||
|
|
d96798ae7b | ||
|
|
b508264ac4 | ||
|
|
db78431b1d | ||
|
|
743ddb196a | ||
|
|
3ffeabdfcb | ||
|
|
51b1f41518 | ||
|
|
e7a56f35b9 | ||
|
|
516af01a12 | ||
|
|
acdb355070 | ||
|
|
37c02a5f7b | ||
|
|
04be352ae9 | ||
|
|
53eb7656de | ||
|
|
d8f0e0ea6e | ||
|
|
2e0fd2cba9 | ||
|
|
909b169593 | ||
|
|
4e67a4027e | ||
|
|
49055658a9 | ||
|
|
89c58ce87d | ||
|
|
14876a4df1 | ||
|
|
2681219039 | ||
|
|
5da7f0100a | ||
|
|
dea9abed29 | ||
|
|
e3eb5c1328 | ||
|
|
fb9364f1fb | ||
|
|
6efb56851c | ||
|
|
db2c7ed1d1 | ||
|
|
74c047cb03 | ||
|
|
50a5ad48fc | ||
|
|
292fccff6e | ||
|
|
a9dc061d84 | ||
|
|
01a8c09920 | ||
|
|
4c8562bcec | ||
|
|
f13c04629b | ||
|
|
80ff907d08 | ||
|
|
673df6d517 | ||
|
|
2d40433bc1 | ||
|
|
3bc39db34e | ||
|
|
a17f14f73a | ||
|
|
6651c655cb | ||
|
|
3ae104edae | ||
|
|
c87a489514 | ||
|
|
a60267501d | ||
|
|
641a56da0d | ||
|
|
59788e25c7 | ||
|
|
a16193bb50 | ||
|
|
132e7413ba | ||
|
|
1966668066 | ||
|
|
064f36ca5a | ||
|
|
15b609ecea | ||
|
|
4a1edfd9aa | ||
|
|
b7f319b62a | ||
|
|
33c101544d | ||
|
|
21cf29330e | ||
|
|
3b21bb5be8 | ||
|
|
6fe2b3f901 | ||
|
|
b368d4cc13 | ||
|
|
0680af7414 | ||
|
|
91805bcab6 | ||
|
|
c0e2886e37 | ||
|
|
4f5dded4d4 | ||
|
|
b3a94c4e85 | ||
|
|
8e618d45fc | ||
|
|
3ef59d2821 | ||
|
|
23db4958f5 | ||
|
|
d9ee668b6d | ||
|
|
2e5d792f0c | ||
|
|
b276651eaa | ||
|
|
3535197f99 | ||
|
|
95f076340a | ||
|
|
698bb93a46 | ||
|
|
2584430141 | ||
|
|
ded373e600 | ||
|
|
e8c54c3d6c | ||
|
|
f944a42886 | ||
|
|
eff0ea43aa | ||
|
|
3b602bb532 | ||
|
|
459985f0fa | ||
|
|
d0080046c2 | ||
|
|
7fcb428622 | ||
|
|
4ea6f94ed8 | ||
|
|
83adc2eebf | ||
|
|
989c318a28 | ||
|
|
ef802f2b2c | ||
|
|
f5d2fbc84c | ||
|
|
e139673969 | ||
|
|
a8c6465f22 | ||
|
|
27538e2d22 | ||
|
|
6c6f0987dc | ||
|
|
5f64658faa | ||
|
|
ce183cb2b4 | ||
|
|
b3bac73c0f | ||
|
|
e726d8ff0f | ||
|
|
f4230777b3 | ||
|
|
380233d646 | ||
|
|
d592bc0c1c | ||
|
|
0d0b0aa599 | ||
|
|
b433bf14ba | ||
|
|
cf371da346 | ||
|
|
107d951893 | ||
|
|
22c53b1c70 | ||
|
|
88926ad8e9 | ||
|
|
32d04091a2 | ||
|
|
b6d4a77b94 | ||
|
|
be84a4fd68 | ||
|
|
2ec1f404ac | ||
|
|
2040559f71 | ||
|
|
ca0ce4c6ef | ||
|
|
757cf413cb | ||
|
|
b35acb3dbc | ||
|
|
e404abf103 | ||
|
|
f7ff19cb18 | ||
|
|
f736702da8 | ||
|
|
91faaa1387 | ||
|
|
68a9f521d5 | ||
|
|
f365a98029 | ||
|
|
47bbc272df | ||
|
|
aebac90013 | ||
|
|
7ca4ba77c4 | ||
|
|
13512170b5 | ||
|
|
154fcaeb56 | ||
|
|
722118386d | ||
|
|
709612cb37 | ||
|
|
b35d083872 | ||
|
|
5e7b243bde | ||
|
|
f8f9fc77ac | ||
|
|
499531f0b5 | ||
|
|
3c2141513f | ||
|
|
602f6a9ad0 | ||
|
|
22c5a5b91b | ||
|
|
41f508765d | ||
|
|
7dccd1f589 | ||
|
|
55ff598b23 | ||
|
|
a22ce4550c | ||
|
|
168ae81b1f | ||
|
|
5f6a25cdd0 | ||
|
|
be97ae4c5d | ||
|
|
4d7d008741 | ||
|
|
2d7a3d1516 | ||
|
|
dfab400d43 | ||
|
|
70078eab10 | ||
|
|
3415c4dd1e | ||
|
|
e200808ab7 | ||
|
|
fae563b85d | ||
|
|
3e6dc02f8f | ||
|
|
95e4cbbfde | ||
|
|
2825294b7b | ||
|
|
bce93b5cfa | ||
|
|
7a4b250c8b | ||
|
|
e5335450a4 | ||
|
|
a6ffdf1dd4 | ||
|
|
69e41f87ef | ||
|
|
ee48f9f206 | ||
|
|
9ba39d7fad | ||
|
|
d2fb371f80 | ||
|
|
2f9018f03b | ||
|
|
eb990f64a9 | ||
|
|
bbb64eaade | ||
|
|
7bd1d899bc | ||
|
|
c91d1ec2e3 | ||
|
|
c50b64027d | ||
|
|
20960b6a2d | ||
|
|
3bd3470d0b | ||
|
|
ba39ed9af7 | ||
|
|
62e6dc950d | ||
|
|
5a5046ce45 | ||
|
|
ad04afe381 | ||
|
|
ba9f0f2480 | ||
|
|
d06b63d056 | ||
|
|
7ce28c3b1d | ||
|
|
e3ac4035b9 | ||
|
|
d21b6daa49 | ||
|
|
76ebb16688 | ||
|
|
55aa431578 | ||
|
|
614981e566 | ||
|
|
b8b956a05d | ||
|
|
d2eed44c78 | ||
|
|
789cbc6fb2 | ||
|
|
0662c90b5c | ||
|
|
a2cab02554 | ||
|
|
6c7a21df6b | ||
|
|
f933b0b708 | ||
|
|
9f305273a7 | ||
|
|
cbd9efcb43 | ||
|
|
29a25a538f | ||
|
|
2dd8faaedc | ||
|
|
f00187033d | ||
|
|
c5141d65ac | ||
|
|
069c4015cd | ||
|
|
2f6e03fb60 | ||
|
|
0fbb945e13 | ||
|
|
b94dd835c9 | ||
|
|
44fc707423 | ||
|
|
5aaef9790f | ||
|
|
7edc352d23 | ||
|
|
850a84b08a | ||
|
|
4148754ce0 | ||
|
|
2107722829 | ||
|
|
d326ba52e9 | ||
|
|
91e1487de4 | ||
|
|
5ffb2a9605 | ||
|
|
17fe91d6d1 | ||
|
|
90a9f2dd70 | ||
|
|
d5e48cfd65 | ||
|
|
d274566463 | ||
|
|
39ac720826 | ||
|
|
21b6204692 | ||
|
|
d98faeb26a | ||
|
|
0a63dc199c | ||
|
|
3ba857dfa1 | ||
|
|
a8554c4022 | ||
|
|
ba54b39c02 | ||
|
|
2a75225569 | ||
|
|
e72429c79c | ||
|
|
c5b3f5553f | ||
|
|
d3ae0aaad3 | ||
|
|
d67bccf861 | ||
|
|
1277ad69a6 | ||
|
|
8f93e81afb | ||
|
|
4af31e654b | ||
|
|
aad50579ba | ||
|
|
38d059b0ae | ||
|
|
bd4eeb4522 | ||
|
|
03e3493288 | ||
|
|
64baedf5a4 | ||
|
|
2f64d5f77e | ||
|
|
f79a4ef4d0 | ||
|
|
2d53854b19 | ||
|
|
e5c83535af | ||
|
|
c904ef966e | ||
|
|
8f266e0772 | ||
|
|
e0fe7cc391 | ||
|
|
9d20dec56a | ||
|
|
597a785253 | ||
|
|
7d75b1e758 | ||
|
|
5f78691fcf | ||
|
|
a591e06ae5 | ||
|
|
443c93c634 | ||
|
|
5659cddc84 | ||
|
|
2a03a34bde | ||
|
|
1654a9b7e6 | ||
|
|
673a521711 | ||
|
|
2e23076688 | ||
|
|
b92ac55250 | ||
|
|
7981509cc8 | ||
|
|
6d5bc045bc | ||
|
|
d38e020b29 | ||
|
|
7d29030292 | ||
|
|
7c7650b7c3 | ||
|
|
ca80eced24 | ||
|
|
d0e0b81d8e | ||
|
|
391baa1c9a | ||
|
|
ae14681c3e | ||
|
|
4d698841f4 | ||
|
|
9906b3ade9 | ||
|
|
bf1769d3e0 | ||
|
|
63e1ad9f29 | ||
|
|
2c7bcee53f | ||
|
|
1fd90c93ff | ||
|
|
e947a844c9 | ||
|
|
4e2d39293a | ||
|
|
1228d6bf1a | ||
|
|
fc4561c64c | ||
|
|
3b7747b42b | ||
|
|
e432e79324 | ||
|
|
08d74819b6 | ||
|
|
aa3fde1784 | ||
|
|
0b3eb7f218 | ||
|
|
69c9496c71 | ||
|
|
b792b36495 | ||
|
|
d3db7d31a3 | ||
|
|
c05ca63158 | ||
|
|
6d3e0c7db6 | ||
|
|
0e59e50b39 | ||
|
|
d4b391de1b | ||
|
|
de4d3dac00 | ||
|
|
534e7161df | ||
|
|
9b219cd646 | ||
|
|
3bab4822f3 | ||
|
|
3c5f2d8916 | ||
|
|
5808190398 | ||
|
|
b2a82248b1 | ||
|
|
4e5fcca8b9 | ||
|
|
c36eaedb93 | ||
|
|
7752b03add | ||
|
|
01bfc78535 | ||
|
|
074d70112d | ||
|
|
e8d14c0d90 | ||
|
|
60d7e8143a | ||
|
|
9667a170de | ||
|
|
abae30f9e1 | ||
|
|
f9311bc9d1 | ||
|
|
b598402738 | ||
|
|
bd026b913f | ||
|
|
72ff69d9bb | ||
|
|
f30417d9a8 | ||
|
|
47a4ad3cd7 | ||
|
|
2f7a10ab31 | ||
|
|
b534dc69ab | ||
|
|
7b7d2ea7d4 | ||
|
|
e00de1c302 | ||
|
|
3549e583a6 | ||
|
|
f5e3eedf34 | ||
|
|
519dbfebf6 | ||
|
|
9a267f9270 | ||
|
|
67bd71b7a5 | ||
|
|
ec49fff583 | ||
|
|
8b660e18f2 | ||
|
|
981497799a | ||
|
|
b9bdc17465 |
3
.github/workflows/go-cross.yml
vendored
3
.github/workflows/go-cross.yml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
5
.github/workflows/go-fips.yml
vendored
5
.github/workflows/go-fips.yml
vendored
@@ -3,8 +3,7 @@ name: FIPS Build Test
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
5
.github/workflows/go-healing.yml
vendored
5
.github/workflows/go-healing.yml
vendored
@@ -3,8 +3,7 @@ name: Healing Functional Tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
18
.github/workflows/go-lint.yml
vendored
18
.github/workflows/go-lint.yml
vendored
@@ -3,12 +3,11 @@ name: Linters and Tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
@@ -21,23 +20,14 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
netsh int ipv4 set dynamicport tcp start=60000 num=61000
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 50m ./...
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
|
||||
5
.github/workflows/go.yml
vendored
5
.github/workflows/go.yml
vendored
@@ -3,8 +3,7 @@ name: Functional Tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
5
.github/workflows/helm-lint.yml
vendored
5
.github/workflows/helm-lint.yml
vendored
@@ -3,8 +3,7 @@ name: Helm Chart linting
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -23,7 +22,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v3
|
||||
uses: azure/setup-helm@v4
|
||||
|
||||
- name: Run helm lint
|
||||
run: |
|
||||
|
||||
37
.github/workflows/iam-integrations.yaml
vendored
37
.github/workflows/iam-integrations.yaml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -62,7 +61,7 @@ jobs:
|
||||
# are turned off - i.e. if ldap="", then ldap server is not enabled for
|
||||
# the tests.
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
ldap: ["", "localhost:389"]
|
||||
etcd: ["", "http://localhost:2379"]
|
||||
openid: ["", "http://127.0.0.1:5556/dex"]
|
||||
@@ -126,3 +125,37 @@ jobs:
|
||||
if: matrix.openid == 'http://127.0.0.1:5556/dex'
|
||||
run: |
|
||||
make test-site-replication-oidc
|
||||
iam-import-with-missing-entities:
|
||||
name: Test IAM import in new cluster with missing entities
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Checkout minio-iam-testing
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: minio/minio-iam-testing
|
||||
path: minio-iam-testing
|
||||
- name: Test import of IAM artifacts when in fresh cluster there are missing groups etc
|
||||
run: |
|
||||
make test-iam-import-with-missing-entities
|
||||
iam-import-with-openid:
|
||||
name: Test IAM import in new cluster with opendid configurations
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Checkout minio-iam-testing
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: minio/minio-iam-testing
|
||||
path: minio-iam-testing
|
||||
- name: Test import of IAM artifacts when in fresh cluster with openid configurations
|
||||
run: |
|
||||
make test-iam-import-with-openid
|
||||
|
||||
8
.github/workflows/mint.yml
vendored
8
.github/workflows/mint.yml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -30,7 +29,7 @@ jobs:
|
||||
- name: setup-go-step
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
go-version: 1.22.x
|
||||
|
||||
- name: github sha short
|
||||
id: vars
|
||||
@@ -56,6 +55,11 @@ jobs:
|
||||
run: |
|
||||
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "erasure" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
|
||||
|
||||
# FIXME: renable this back when we have a valid way to add deadlines for PUT()s (internode CreateFile)
|
||||
# - name: resiliency
|
||||
# run: |
|
||||
# ${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "resiliency" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
|
||||
|
||||
- name: The job must cleanup
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
|
||||
78
.github/workflows/mint/minio-resiliency.yaml
vendored
Normal file
78
.github/workflows/mint/minio-resiliency.yaml
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: quay.io/minio/minio:${JOB_NAME}
|
||||
command: server --console-address ":9001" http://minio{1...4}/rdata{1...2}
|
||||
expose:
|
||||
- "9000"
|
||||
- "9001"
|
||||
environment:
|
||||
MINIO_CI_CD: "on"
|
||||
MINIO_ROOT_USER: "minio"
|
||||
MINIO_ROOT_PASSWORD: "minio123"
|
||||
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
|
||||
MINIO_DRIVE_MAX_TIMEOUT: "5s"
|
||||
healthcheck:
|
||||
test: ["CMD", "mc", "ready", "local"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
# it through port 9000.
|
||||
services:
|
||||
minio1:
|
||||
<<: *minio-common
|
||||
hostname: minio1
|
||||
volumes:
|
||||
- rdata1-1:/rdata1
|
||||
- rdata1-2:/rdata2
|
||||
|
||||
minio2:
|
||||
<<: *minio-common
|
||||
hostname: minio2
|
||||
volumes:
|
||||
- rdata2-1:/rdata1
|
||||
- rdata2-2:/rdata2
|
||||
|
||||
minio3:
|
||||
<<: *minio-common
|
||||
hostname: minio3
|
||||
volumes:
|
||||
- rdata3-1:/rdata1
|
||||
- rdata3-2:/rdata2
|
||||
|
||||
minio4:
|
||||
<<: *minio-common
|
||||
hostname: minio4
|
||||
volumes:
|
||||
- rdata4-1:/rdata1
|
||||
- rdata4-2:/rdata2
|
||||
|
||||
nginx:
|
||||
image: nginx:1.19.2-alpine
|
||||
hostname: nginx
|
||||
volumes:
|
||||
- ./nginx-4-node.conf:/etc/nginx/nginx.conf:ro
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
||||
depends_on:
|
||||
- minio1
|
||||
- minio2
|
||||
- minio3
|
||||
- minio4
|
||||
|
||||
## By default this config uses default local driver,
|
||||
## For custom volumes replace with volume driver configuration.
|
||||
volumes:
|
||||
rdata1-1:
|
||||
rdata1-2:
|
||||
rdata2-1:
|
||||
rdata2-2:
|
||||
rdata3-1:
|
||||
rdata3-2:
|
||||
rdata4-1:
|
||||
rdata4-2:
|
||||
7
.github/workflows/mint/nginx-4-node.conf
vendored
7
.github/workflows/mint/nginx-4-node.conf
vendored
@@ -23,10 +23,9 @@ http {
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
||||
16
.github/workflows/mint/nginx-8-node.conf
vendored
16
.github/workflows/mint/nginx-8-node.conf
vendored
@@ -23,14 +23,14 @@ http {
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio5:9000;
|
||||
server minio6:9000;
|
||||
server minio7:9000;
|
||||
server minio8:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio4:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio5:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio6:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio7:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio8:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
||||
8
.github/workflows/mint/nginx.conf
vendored
8
.github/workflows/mint/nginx.conf
vendored
@@ -23,10 +23,10 @@ http {
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio4:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
||||
16
.github/workflows/multipart/migrate.sh
vendored
16
.github/workflows/multipart/migrate.sh
vendored
@@ -24,8 +24,6 @@ if [ ! -f ./mc ]; then
|
||||
chmod +x mc
|
||||
fi
|
||||
|
||||
go install -v github.com/minio/minio/docs/debugging/s3-check-md5@latest
|
||||
|
||||
export RELEASE=RELEASE.2023-08-29T23-07-35Z
|
||||
|
||||
docker-compose -f docker-compose-site1.yaml up -d
|
||||
@@ -45,10 +43,10 @@ sleep 30s
|
||||
|
||||
sleep 5
|
||||
|
||||
s3-check-md5 -h
|
||||
./s3-check-md5 -h
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
if [ $failed_count_site1 -ne 0 ]; then
|
||||
echo "failed with multipart on site1 uploads"
|
||||
@@ -64,8 +62,8 @@ fi
|
||||
|
||||
sleep 5
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
## we do not need to fail here, since we are going to test
|
||||
## upgrading to master, healing and being able to recover
|
||||
@@ -93,8 +91,8 @@ for i in $(seq 1 10); do
|
||||
./mc admin heal -r --remove --json site2/ 2>&1 >/dev/null
|
||||
done
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
if [ $failed_count_site1 -ne 0 ]; then
|
||||
echo "failed with multipart on site1 uploads"
|
||||
|
||||
17
.github/workflows/replication.yaml
vendored
17
.github/workflows/replication.yaml
vendored
@@ -3,8 +3,7 @@ name: MinIO advanced tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -22,7 +21,7 @@ jobs:
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -42,6 +41,12 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-ilm
|
||||
|
||||
- name: Test PBAC
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-pbac
|
||||
|
||||
- name: Test Config File
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
@@ -65,3 +70,9 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-versioning
|
||||
|
||||
- name: Test Multipart upload with failures
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-multipart
|
||||
|
||||
5
.github/workflows/root-disable.yml
vendored
5
.github/workflows/root-disable.yml
vendored
@@ -3,8 +3,7 @@ name: Root lockdown tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
|
||||
17
.github/workflows/run-mint.sh
vendored
17
.github/workflows/run-mint.sh
vendored
@@ -16,7 +16,7 @@ docker volume rm $(docker volume ls -f dangling=true) || true
|
||||
cd .github/workflows/mint
|
||||
|
||||
docker-compose -f minio-${MODE}.yaml up -d
|
||||
sleep 30s
|
||||
sleep 1m
|
||||
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
@@ -26,6 +26,9 @@ docker volume rm $(docker volume ls -q -f dangling=true) || true
|
||||
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio2
|
||||
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio6
|
||||
|
||||
# Pause one node, to check that all S3 calls work while one node goes wrong
|
||||
[ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml pause minio4
|
||||
|
||||
docker run --rm --net=mint_default \
|
||||
--name="mint-${MODE}-${JOB_NAME}" \
|
||||
-e SERVER_ENDPOINT="nginx:9000" \
|
||||
@@ -35,6 +38,18 @@ docker run --rm --net=mint_default \
|
||||
-e MINT_MODE="${MINT_MODE}" \
|
||||
docker.io/minio/mint:edge
|
||||
|
||||
# FIXME: enable this after fixing aws-sdk-java-v2 tests
|
||||
# # unpause the node, to check that all S3 calls work while one node goes wrong
|
||||
# [ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml unpause minio4
|
||||
# [ "${MODE}" == "resiliency" ] && docker run --rm --net=mint_default \
|
||||
# --name="mint-${MODE}-${JOB_NAME}" \
|
||||
# -e SERVER_ENDPOINT="nginx:9000" \
|
||||
# -e ACCESS_KEY="${ACCESS_KEY}" \
|
||||
# -e SECRET_KEY="${SECRET_KEY}" \
|
||||
# -e ENABLE_HTTPS=0 \
|
||||
# -e MINT_MODE="${MINT_MODE}" \
|
||||
# docker.io/minio/mint:edge
|
||||
|
||||
docker-compose -f minio-${MODE}.yaml down || true
|
||||
sleep 10s
|
||||
|
||||
|
||||
3
.github/workflows/shfmt.yml
vendored
3
.github/workflows/shfmt.yml
vendored
@@ -3,8 +3,7 @@ name: Shell formatting checks
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
5
.github/workflows/upgrade-ci-cd.yaml
vendored
5
.github/workflows/upgrade-ci-cd.yaml
vendored
@@ -3,8 +3,7 @@ name: Upgrade old version tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
|
||||
3
.github/workflows/vulncheck.yml
vendored
3
.github/workflows/vulncheck.yml
vendored
@@ -21,8 +21,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.9
|
||||
check-latest: true
|
||||
go-version: 1.22.7
|
||||
- name: Get official govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
shell: bash
|
||||
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -43,4 +43,13 @@ docs/debugging/inspect/inspect
|
||||
docs/debugging/pprofgoparser/pprofgoparser
|
||||
docs/debugging/reorder-disks/reorder-disks
|
||||
docs/debugging/populate-hard-links/populate-hardlinks
|
||||
docs/debugging/xattr/xattr
|
||||
docs/debugging/xattr/xattr
|
||||
hash-set
|
||||
healing-bin
|
||||
inspect
|
||||
pprofgoparser
|
||||
reorder-disks
|
||||
s3-check-md5
|
||||
s3-verify
|
||||
xattr
|
||||
xl-meta
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
extend-exclude = [
|
||||
".git/",
|
||||
"docs/",
|
||||
"CREDITS",
|
||||
"go.mod",
|
||||
"go.sum",
|
||||
]
|
||||
ignore-hidden = false
|
||||
|
||||
@@ -16,6 +19,7 @@ extend-ignore-re = [
|
||||
"MIIDBTCCAe2gAwIBAgIQWHw7h.*",
|
||||
'http\.Header\{"X-Amz-Server-Side-Encryptio":',
|
||||
"ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge",
|
||||
"ERRO:",
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
|
||||
@@ -12,8 +12,9 @@ Fork [MinIO upstream](https://github.com/minio/minio/fork) source repository to
|
||||
|
||||
```sh
|
||||
git clone https://github.com/minio/minio
|
||||
cd minio
|
||||
go install -v
|
||||
ls /go/bin/minio
|
||||
ls $(go env GOPATH)/bin/minio
|
||||
```
|
||||
|
||||
### Set up git remote as ``upstream``
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
FROM minio/minio:latest
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY ./minio /usr/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
FROM minio/minio:latest
|
||||
|
||||
ENV PATH=/opt/bin:$PATH
|
||||
|
||||
COPY ./minio /opt/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,24 +1,26 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
FROM golang:1.22-alpine as build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
# Download minio binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
# Download mc binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
@@ -51,9 +53,11 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/minio* /usr/bin/
|
||||
COPY --from=build /go/bin/mc* /usr/bin/
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
|
||||
@@ -1,35 +1,39 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
FROM golang:1.22-alpine AS build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
apk add -U --no-cache bash && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
# Download minio binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
# Download mc binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
|
||||
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
COPY dockerscripts/download-static-curl.sh /build/download-static-curl
|
||||
RUN chmod +x /build/download-static-curl && \
|
||||
/build/download-static-curl
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
|
||||
|
||||
ARG RELEASE
|
||||
@@ -51,10 +55,12 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
COPY --from=build /go/bin/minio* /usr/bin/
|
||||
COPY --from=build /go/bin/mc* /usr/bin/
|
||||
COPY --from=build /go/bin/curl* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
@@ -1,21 +1,28 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
FROM golang:1.22-alpine AS build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
# Download minio binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /go/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.sha256sum -o /go/bin/minio.sha256sum && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.fips -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.fips.minisig -o /go/bin/mc.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.fips.sha256sum -o /go/bin/mc.sha256sum && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
@@ -44,8 +51,11 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/minio* /usr/bin/
|
||||
COPY --from=build /go/bin/mc* /usr/bin/
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
|
||||
@@ -1,24 +1,26 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
FROM golang:1.22-alpine AS build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
# Download minio binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
# Download mc binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
@@ -51,9 +53,11 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/minio* /usr/bin/
|
||||
COPY --from=build /go/bin/mc* /usr/bin/
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
|
||||
68
Makefile
68
Makefile
@@ -24,7 +24,7 @@ help: ## print this help
|
||||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOLANGCI_DIR)
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.10-0.20240227114326-6d6f813fff1b
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.2.3-0.20241022140105-4558fbf3a223
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
|
||||
crosscompile: ## cross compile minio
|
||||
@@ -34,20 +34,23 @@ verifiers: lint check-gen
|
||||
|
||||
check-gen: ## check for updated autogenerated files
|
||||
@go generate ./... >/dev/null
|
||||
@go mod tidy -compat=1.21
|
||||
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
|
||||
@(! git diff --name-only | grep 'go.sum') || (echo "Non-committed changes in auto-generated go.sum is detected, please commit them to proceed." && false)
|
||||
|
||||
lint: getdeps ## runs golangci-lint suite of linters
|
||||
@echo "Running $@ check"
|
||||
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
@command typos && typos ./ || echo "typos binary is not found.. skipping.."
|
||||
|
||||
lint-fix: getdeps ## runs golangci-lint suite of linters with automatic fixes
|
||||
@echo "Running $@ check"
|
||||
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml --fix
|
||||
|
||||
check: test
|
||||
test: verifiers build build-debugging ## builds minio, runs linters, tests
|
||||
test: verifiers build ## builds minio, runs linters, tests
|
||||
@echo "Running unit tests"
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue ./...
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue,dev ./...
|
||||
|
||||
test-root-disable: install-race
|
||||
@echo "Running minio root lockdown tests"
|
||||
@@ -57,12 +60,17 @@ test-ilm: install-race
|
||||
@echo "Running ILM tests"
|
||||
@env bash $(PWD)/docs/bucket/replication/setup_ilm_expiry_replication.sh
|
||||
|
||||
test-pbac: install-race
|
||||
@echo "Running bucket policies tests"
|
||||
@env bash $(PWD)/docs/iam/policies/pbac-tests.sh
|
||||
|
||||
test-decom: install-race
|
||||
@echo "Running minio decom tests"
|
||||
@env bash $(PWD)/docs/distributed/decom.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted-sse-s3.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-compressed-sse-s3.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted-kes.sh
|
||||
|
||||
test-versioning: install-race
|
||||
@echo "Running minio versioning tests"
|
||||
@@ -79,16 +87,24 @@ test-race: verifiers build ## builds minio, runs linters, tests (race)
|
||||
@echo "Running unit tests under -race"
|
||||
@(env bash $(PWD)/buildscripts/race.sh)
|
||||
|
||||
test-iam: build ## verify IAM (external IDP, etcd backends)
|
||||
test-iam: install-race ## verify IAM (external IDP, etcd backends)
|
||||
@echo "Running tests for IAM (external IDP, etcd backends)"
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -timeout 15m -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -timeout 15m -race -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
|
||||
test-iam-ldap-upgrade-import: build ## verify IAM (external LDAP IDP)
|
||||
test-iam-ldap-upgrade-import: install-race ## verify IAM (external LDAP IDP)
|
||||
@echo "Running upgrade tests for IAM (LDAP backend)"
|
||||
@env bash $(PWD)/buildscripts/minio-iam-ldap-upgrade-import-test.sh
|
||||
|
||||
test-iam-import-with-missing-entities: install-race ## test import of external iam config withg missing entities
|
||||
@echo "Test IAM import configurations with missing entities"
|
||||
@env bash $(PWD)/docs/distributed/iam-import-with-missing-entities.sh
|
||||
|
||||
test-iam-import-with-openid: install-race
|
||||
@echo "Test IAM import configurations with openid"
|
||||
@env bash $(PWD)/docs/distributed/iam-import-with-openid.sh
|
||||
|
||||
test-sio-error:
|
||||
@(env bash $(PWD)/docs/bucket/replication/sio-error.sh)
|
||||
|
||||
@@ -101,7 +117,10 @@ test-replication-3site:
|
||||
test-delete-replication:
|
||||
@(env bash $(PWD)/docs/bucket/replication/delete-replication.sh)
|
||||
|
||||
test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error ## verify multi site replication
|
||||
test-delete-marker-proxying:
|
||||
@(env bash $(PWD)/docs/bucket/replication/test_del_marker_proxying.sh)
|
||||
|
||||
test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error test-delete-marker-proxying ## verify multi site replication
|
||||
@echo "Running tests for replicating three sites"
|
||||
|
||||
test-site-replication-ldap: install-race ## verify automatic site replication
|
||||
@@ -122,37 +141,36 @@ test-site-replication-minio: install-race ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of SSE-C objects with compression enabled for site"
|
||||
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication-with-compression.sh)
|
||||
|
||||
verify: ## verify minio various setups
|
||||
test-multipart: install-race ## test multipart
|
||||
@echo "Test multipart behavior when part files are missing"
|
||||
@(env bash $(PWD)/buildscripts/multipart-quorum-test.sh)
|
||||
|
||||
verify: install-race ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
verify-healing: ## verify healing and replacing disks with minio binary
|
||||
verify-healing: install-race ## verify healing and replacing disks with minio binary
|
||||
@echo "Verify healing build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
@(env bash $(PWD)/buildscripts/verify-healing-empty-erasure-set.sh)
|
||||
@(env bash $(PWD)/buildscripts/heal-inconsistent-versions.sh)
|
||||
|
||||
verify-healing-with-root-disks: ## verify healing root disks
|
||||
verify-healing-with-root-disks: install-race ## verify healing root disks
|
||||
@echo "Verify healing with root drives"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing-with-root-disks.sh)
|
||||
|
||||
verify-healing-with-rewrite: ## verify healing to rewrite old xl.meta -> new xl.meta
|
||||
verify-healing-with-rewrite: install-race ## verify healing to rewrite old xl.meta -> new xl.meta
|
||||
@echo "Verify healing with rewrite"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/rewrite-old-new.sh)
|
||||
|
||||
verify-healing-inconsistent-versions: ## verify resolving inconsistent versions
|
||||
verify-healing-inconsistent-versions: install-race ## verify resolving inconsistent versions
|
||||
@echo "Verify resolving inconsistent versions build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)
|
||||
|
||||
build-debugging:
|
||||
@(env bash $(PWD)/docs/debugging/build.sh)
|
||||
|
||||
build: checks ## builds minio to $(PWD)
|
||||
build: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
@@ -162,9 +180,9 @@ hotfix-vars:
|
||||
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
|
||||
|
||||
hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
|
||||
@wget -q -c https://github.com/minio/pkger/releases/download/v2.2.9/pkger_2.2.9_linux_amd64.deb
|
||||
@wget -q -c https://github.com/minio/pkger/releases/download/v2.3.1/pkger_2.3.1_linux_amd64.deb
|
||||
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.0.1/linux-systemd/distributed/minio.service
|
||||
@sudo apt install ./pkger_2.2.9_linux_amd64.deb --yes
|
||||
@sudo apt install ./pkger_2.3.1_linux_amd64.deb --yes
|
||||
@mkdir -p minio-release/$(GOOS)-$(GOARCH)/archive
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)
|
||||
@@ -191,15 +209,15 @@ docker: build ## builds minio docker container
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
|
||||
|
||||
install-race: checks ## builds minio to $(PWD)
|
||||
install-race: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary with -race to './minio'"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue,dev -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@echo "Installing minio binary with -race to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
|
||||
|
||||
install: build ## builds minio and installs it to $GOPATH/bin.
|
||||
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@echo "Installation successful. To learn more, try \"minio --help\"."
|
||||
|
||||
clean: ## cleanup all generated assets
|
||||
|
||||
@@ -93,7 +93,6 @@ The following table lists supported architectures. Replace the `wget` URL with t
|
||||
| 64-bit Intel/AMD | <https://dl.min.io/server/minio/release/linux-amd64/minio> |
|
||||
| 64-bit ARM | <https://dl.min.io/server/minio/release/linux-arm64/minio> |
|
||||
| 64-bit PowerPC LE (ppc64le) | <https://dl.min.io/server/minio/release/linux-ppc64le/minio> |
|
||||
| IBM Z-Series (S390X) | <https://dl.min.io/server/minio/release/linux-s390x/minio> |
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
@@ -210,10 +209,6 @@ For deployments behind a load balancer, proxy, or ingress rule where the MinIO h
|
||||
|
||||
For example, consider a MinIO deployment behind a proxy `https://minio.example.net`, `https://console.minio.example.net` with rules for forwarding traffic on port :9000 and :9001 to MinIO and the MinIO Console respectively on the internal network. Set `MINIO_BROWSER_REDIRECT_URL` to `https://console.minio.example.net` to ensure the browser receives a valid reachable URL.
|
||||
|
||||
Similarly, if your TLS certificates do not have the IP SAN for the MinIO server host, the MinIO Console may fail to validate the connection to the server. Use the `MINIO_SERVER_URL` environment variable and specify the proxy-accessible hostname of the MinIO server to allow the Console to use the MinIO server API using the TLS certificate.
|
||||
|
||||
For example: `export MINIO_SERVER_URL="https://minio.example.net"`
|
||||
|
||||
| Dashboard | Creating a bucket |
|
||||
| ------------- | ------------- |
|
||||
|  |  |
|
||||
|
||||
0
buildscripts/checkdeps.sh
Normal file → Executable file
0
buildscripts/checkdeps.sh
Normal file → Executable file
@@ -32,6 +32,7 @@ fi
|
||||
set +e
|
||||
|
||||
export MC_HOST_minioadm=http://minioadmin:minioadmin@localhost:9100/
|
||||
./mc ready minioadm
|
||||
|
||||
./mc ls minioadm/
|
||||
|
||||
@@ -56,7 +57,7 @@ done
|
||||
|
||||
set +e
|
||||
|
||||
sleep 10
|
||||
./mc ready minioadm/
|
||||
|
||||
./mc ls minioadm/
|
||||
if [ $? -ne 0 ]; then
|
||||
@@ -81,11 +82,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
|
||||
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
|
||||
|
||||
sleep 20s
|
||||
|
||||
export MC_HOST_sitea=http://minioadmin:minioadmin@127.0.0.1:9001
|
||||
export MC_HOST_siteb=http://minioadmin:minioadmin@127.0.0.1:9004
|
||||
|
||||
./mc ready sitea
|
||||
./mc ready siteb
|
||||
|
||||
./mc admin replicate add sitea siteb
|
||||
|
||||
./mc admin user add sitea foobar foo12345
|
||||
@@ -109,11 +111,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
|
||||
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
|
||||
|
||||
sleep 20s
|
||||
|
||||
export MC_HOST_sitea=http://foobar:foo12345@127.0.0.1:9001
|
||||
export MC_HOST_siteb=http://foobar:foo12345@127.0.0.1:9004
|
||||
|
||||
./mc ready sitea
|
||||
./mc ready siteb
|
||||
|
||||
./mc admin user add sitea foobar-admin foo12345
|
||||
|
||||
sleep 2s
|
||||
|
||||
@@ -56,6 +56,7 @@ create_iam_content_in_old_minio() {
|
||||
|
||||
set -x
|
||||
mc alias set old-minio http://localhost:9000 minioadmin minioadmin
|
||||
mc ready old-minio
|
||||
mc idp ldap add old-minio \
|
||||
server_addr=localhost:1389 \
|
||||
server_insecure=on \
|
||||
@@ -87,6 +88,7 @@ import_iam_content_in_new_minio() {
|
||||
set -x
|
||||
mc alias set new-minio http://localhost:9000 minioadmin minioadmin
|
||||
echo "BEFORE IMPORT mappings:"
|
||||
mc ready new-minio
|
||||
mc idp ldap policy entities new-minio
|
||||
mc admin cluster iam import new-minio ./old-minio-iam-info.zip
|
||||
echo "AFTER IMPORT mappings:"
|
||||
|
||||
22
buildscripts/minio-upgrade.sh
Normal file → Executable file
22
buildscripts/minio-upgrade.sh
Normal file → Executable file
@@ -4,10 +4,22 @@ trap 'cleanup $LINENO' ERR
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
cleanup() {
|
||||
MINIO_VERSION=dev docker-compose \
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
rm -s -f
|
||||
down || true
|
||||
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
rm || true
|
||||
|
||||
for volume in $(docker volume ls -q | grep upgrade); do
|
||||
docker volume rm ${volume} || true
|
||||
done
|
||||
|
||||
docker volume prune -f
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
docker volume rm $(docker volume ls -q -f dangling=true) || true
|
||||
}
|
||||
|
||||
verify_checksum_after_heal() {
|
||||
@@ -60,6 +72,8 @@ __init__() {
|
||||
go install github.com/docker/compose/v2/cmd@latest
|
||||
mv -v /tmp/gopath/bin/cmd /tmp/gopath/bin/docker-compose
|
||||
|
||||
cleanup
|
||||
|
||||
TAG=minio/minio:dev make docker
|
||||
|
||||
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
|
||||
@@ -77,11 +91,11 @@ __init__() {
|
||||
|
||||
curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum
|
||||
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
|
||||
}
|
||||
|
||||
main() {
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
|
||||
|
||||
add_alias
|
||||
|
||||
|
||||
126
buildscripts/multipart-quorum-test.sh
Normal file
126
buildscripts/multipart-quorum-test.sh
Normal file
@@ -0,0 +1,126 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "$TEST_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
trap 'catch $LINENO' ERR
|
||||
|
||||
function purge() {
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
catch() {
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "error on line $1"
|
||||
fi
|
||||
|
||||
echo "Cleaning up instances of MinIO"
|
||||
pkill minio || true
|
||||
pkill -9 minio || true
|
||||
purge "$WORK_DIR"
|
||||
if [ $# -ne 0 ]; then
|
||||
exit $#
|
||||
fi
|
||||
}
|
||||
|
||||
catch
|
||||
|
||||
function start_minio_10drive() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
mkdir ${WORK_DIR}
|
||||
C_PWD=${PWD}
|
||||
if [ ! -x "$PWD/mc" ]; then
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
fi
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/disk{1...10}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 5
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${PWD}/mc" mb --with-versioning minio/bucket
|
||||
|
||||
export AWS_ACCESS_KEY_ID=minio
|
||||
export AWS_SECRET_ACCESS_KEY=minio123
|
||||
aws --endpoint-url http://localhost:"$start_port" s3api create-multipart-upload --bucket bucket --key obj-1 >upload-id.json
|
||||
uploadId=$(jq -r '.UploadId' upload-id.json)
|
||||
|
||||
truncate -s 5MiB file-5mib
|
||||
for i in {1..2}; do
|
||||
aws --endpoint-url http://localhost:"$start_port" s3api upload-part \
|
||||
--upload-id "$uploadId" --bucket bucket --key obj-1 \
|
||||
--part-number "$i" --body ./file-5mib
|
||||
done
|
||||
for i in {1..6}; do
|
||||
find ${WORK_DIR}/disk${i}/.minio.sys/multipart/ -type f -name "part.1" -delete
|
||||
done
|
||||
cat <<EOF >parts.json
|
||||
{
|
||||
"Parts": [
|
||||
{
|
||||
"PartNumber": 1,
|
||||
"ETag": "5f363e0e58a95f06cbe9bbc662c5dfb6"
|
||||
},
|
||||
{
|
||||
"PartNumber": 2,
|
||||
"ETag": "5f363e0e58a95f06cbe9bbc662c5dfb6"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
err=$(aws --endpoint-url http://localhost:"$start_port" s3api complete-multipart-upload --upload-id "$uploadId" --bucket bucket --key obj-1 --multipart-upload file://./parts.json 2>&1)
|
||||
rv=$?
|
||||
if [ $rv -eq 0 ]; then
|
||||
echo "Failed to receive an error"
|
||||
exit 1
|
||||
fi
|
||||
echo "Received an error during complete-multipart as expected: $err"
|
||||
}
|
||||
|
||||
function main() {
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
start_minio_10drive ${start_port}
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -45,7 +45,8 @@ function verify_rewrite() {
|
||||
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready minio/
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
@@ -77,7 +78,8 @@ function verify_rewrite() {
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready minio/
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
@@ -87,14 +89,12 @@ function verify_rewrite() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
go install -v github.com/minio/minio/docs/debugging/s3-check-md5@latest
|
||||
|
||||
if ! s3-check-md5 \
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
-endpoint "http://127.0.0.1:${start_port}/" 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
@@ -114,7 +114,7 @@ function verify_rewrite() {
|
||||
go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123"
|
||||
sleep 1
|
||||
|
||||
if ! s3-check-md5 \
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: minio/minio:${MINIO_VERSION}
|
||||
|
||||
@@ -15,13 +15,14 @@ WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
export MINT_MODE=core
|
||||
export MINT_DATA_DIR="$WORK_DIR/data"
|
||||
export SERVER_ENDPOINT="127.0.0.1:9000"
|
||||
export MC_HOST_verify="http://minio:minio123@${SERVER_ENDPOINT}/"
|
||||
export MC_HOST_verify_ipv6="http://minio:minio123@[::1]:9000/"
|
||||
export ACCESS_KEY="minio"
|
||||
export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
export GOGC=25
|
||||
export ENABLE_ADMIN=1
|
||||
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
@@ -36,18 +37,21 @@ function start_minio_fs() {
|
||||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_erasure() {
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
|
||||
sleep 15
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_erasure_sets() {
|
||||
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
|
||||
"${MINIO[@]}" server >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
sleep 15
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_pool_erasure_sets() {
|
||||
@@ -57,7 +61,7 @@ function start_minio_pool_erasure_sets() {
|
||||
"${MINIO[@]}" server --address ":9000" >"$WORK_DIR/pool-minio-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address ":9001" >"$WORK_DIR/pool-minio-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_pool_erasure_sets_ipv6() {
|
||||
@@ -67,7 +71,7 @@ function start_minio_pool_erasure_sets_ipv6() {
|
||||
"${MINIO[@]}" server --address="[::1]:9000" >"$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" >"$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify_ipv6
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure() {
|
||||
@@ -78,7 +82,7 @@ function start_minio_dist_erasure() {
|
||||
"${MINIO[@]}" server --address ":900${i}" >"$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
|
||||
done
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function run_test_fs() {
|
||||
@@ -222,7 +226,7 @@ function __init__() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "${WORK_DIR}/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
||||
@@ -19,7 +19,7 @@ function start_minio_3_node() {
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
start_port=$2
|
||||
start_port=$1
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
|
||||
@@ -37,40 +37,48 @@ function start_minio_3_node() {
|
||||
pid3=$!
|
||||
disown $pid3
|
||||
|
||||
sleep "$1"
|
||||
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
|
||||
|
||||
timeout 15m /tmp/mc ready myminio || fail
|
||||
|
||||
# Wait for all drives to be online and formatted
|
||||
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].state | select(. != "ok")' | wc -l) -gt 0 ]; do sleep 1; done
|
||||
# Wait for all drives to be healed
|
||||
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].healing | select(. != null) | select(. == true)' | wc -l) -gt 0 ]; do sleep 1; done
|
||||
|
||||
# Wait for Status: in MinIO output
|
||||
while true; do
|
||||
rv=$(check_online)
|
||||
if [ "$rv" != "1" ]; then
|
||||
# success
|
||||
break
|
||||
fi
|
||||
|
||||
# Check if we should retry
|
||||
retry=$((retry + 1))
|
||||
if [ $retry -le 20 ]; then
|
||||
sleep 5
|
||||
continue
|
||||
fi
|
||||
|
||||
# Failure
|
||||
fail
|
||||
done
|
||||
|
||||
if ! ps -p $pid1 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio-server-1 is not running." && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid2 1>&2 >/dev/null; then
|
||||
echo "server2 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server2.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio-server-2 is not running." && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid3 1>&2 >/dev/null; then
|
||||
echo "server3 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server3.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio-server-3 is not running." && fail
|
||||
fi
|
||||
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fail
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
@@ -82,14 +90,24 @@ function start_minio_3_node() {
|
||||
fi
|
||||
}
|
||||
|
||||
function fail() {
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function check_online() {
|
||||
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
if ! grep -q 'API:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
fi
|
||||
}
|
||||
|
||||
function purge() {
|
||||
rm -rf "$1"
|
||||
echo rm -rf "$1"
|
||||
}
|
||||
|
||||
function __init__() {
|
||||
@@ -99,10 +117,15 @@ function __init__() {
|
||||
|
||||
## version is purposefully set to '3' for minio to migrate configuration file
|
||||
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
|
||||
|
||||
if [ ! -f /tmp/mc ]; then
|
||||
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x /tmp/mc
|
||||
fi
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node 120 $2
|
||||
start_minio_3_node $2
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
@@ -110,19 +133,7 @@ function perform_test() {
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
set -x
|
||||
start_minio_3_node 120 $2
|
||||
|
||||
rv=$(check_online)
|
||||
if [ "$rv" == "1" ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
start_minio_3_node $2
|
||||
}
|
||||
|
||||
function main() {
|
||||
|
||||
@@ -15,6 +15,10 @@ MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
GOPATH=/tmp/gopath
|
||||
|
||||
function start_minio_3_node() {
|
||||
for i in $(seq 1 3); do
|
||||
rm "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
@@ -22,7 +26,7 @@ function start_minio_3_node() {
|
||||
|
||||
first_time=$(find ${WORK_DIR}/ | grep format.json | wc -l)
|
||||
|
||||
start_port=$2
|
||||
start_port=$1
|
||||
args=""
|
||||
for d in $(seq 1 3 5); do
|
||||
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
|
||||
@@ -42,42 +46,26 @@ function start_minio_3_node() {
|
||||
pid3=$!
|
||||
disown $pid3
|
||||
|
||||
sleep "$1"
|
||||
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
|
||||
timeout 15m /tmp/mc ready myminio || fail
|
||||
|
||||
[ ${first_time} -eq 0 ] && upload_objects $start_port
|
||||
[ ${first_time} -eq 0 ] && upload_objects
|
||||
[ ${first_time} -ne 0 ] && sleep 120
|
||||
|
||||
if ! ps -p $pid1 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio server 1 is not running" && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid2 1>&2 >/dev/null; then
|
||||
echo "server2 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server2.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio server 2 is not running" && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid3 1>&2 >/dev/null; then
|
||||
echo "server3 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server3.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio server 3 is not running" && fail
|
||||
fi
|
||||
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fail
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
@@ -90,7 +78,7 @@ function start_minio_3_node() {
|
||||
}
|
||||
|
||||
function check_heal() {
|
||||
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
if ! grep -q 'API:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -112,6 +100,17 @@ function purge() {
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
function fail() {
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function __init__() {
|
||||
echo "Initializing environment"
|
||||
mkdir -p "$WORK_DIR"
|
||||
@@ -127,10 +126,6 @@ function __init__() {
|
||||
}
|
||||
|
||||
function upload_objects() {
|
||||
start_port=$1
|
||||
|
||||
/tmp/mc alias set myminio http://127.0.0.1:$((start_port + 1)) minio minio123 --api=s3v4
|
||||
/tmp/mc ready myminio
|
||||
/tmp/mc mb myminio/testbucket/
|
||||
for ((i = 0; i < 20; i++)); do
|
||||
echo "my content" | /tmp/mc pipe myminio/testbucket/file-$i
|
||||
@@ -140,7 +135,7 @@ function upload_objects() {
|
||||
function perform_test() {
|
||||
start_port=$2
|
||||
|
||||
start_minio_3_node 120 $start_port
|
||||
start_minio_3_node $start_port
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' node"
|
||||
@@ -148,19 +143,12 @@ function perform_test() {
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
set -x
|
||||
start_minio_3_node 120 $start_port
|
||||
start_minio_3_node $start_port
|
||||
|
||||
check_heal ${1}
|
||||
rv=$?
|
||||
if [ "$rv" == "1" ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fail
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Data types used for returning dummy access control
|
||||
|
||||
@@ -40,7 +40,7 @@ import (
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -427,6 +427,21 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
cfgPath := pathJoin(bi.Name, cfgFile)
|
||||
bucket := bi.Name
|
||||
switch cfgFile {
|
||||
case bucketPolicyConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketPolicyNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
||||
case bucketNotificationConfig:
|
||||
config, err := globalBucketMetadataSys.GetNotificationConfig(bucket)
|
||||
if err != nil {
|
||||
@@ -783,7 +798,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
switch fileName {
|
||||
case bucketNotificationConfig:
|
||||
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalEventNotifier.targetList)
|
||||
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region(), globalEventNotifier.targetList)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
||||
continue
|
||||
@@ -796,11 +811,12 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
bucketMap[bucket].NotificationConfigXML = configData
|
||||
bucketMap[bucket].NotificationConfigUpdatedAt = updatedAt
|
||||
rpt.SetStatus(bucket, fileName, nil)
|
||||
case bucketPolicyConfig:
|
||||
// Error out if Content-Length is beyond allowed size.
|
||||
if sz > maxBucketPolicySize {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyTooLarge.String()))
|
||||
rpt.SetStatus(bucket, fileName, errors.New(ErrPolicyTooLarge.String()))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -818,7 +834,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
|
||||
// Version in policy must not be empty
|
||||
if bucketPolicy.Version == "" {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyInvalidVersion.String()))
|
||||
rpt.SetStatus(bucket, fileName, errors.New(ErrPolicyInvalidVersion.String()))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -837,9 +853,13 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
rcfg, err := globalBucketObjectLockSys.Get(bucket)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
// Validate the received bucket policy document
|
||||
if err = bucketLifecycle.Validate(); err != nil {
|
||||
if err = bucketLifecycle.Validate(rcfg); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
@@ -874,8 +894,10 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
|
||||
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: kmsKey,
|
||||
AssociatedData: kms.Context{"MinIO admin API": "ServerInfoHandler"}, // Context for a test key operation
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
rpt.SetStatus(bucket, fileName, errKMSKeyNotFound)
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// validateAdminReq will validate request against and return whether it is allowed.
|
||||
@@ -216,6 +216,12 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierInvalidConfig):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInvalidConfig",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
default:
|
||||
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ import (
|
||||
"github.com/minio/minio/internal/config/subnet"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
|
||||
|
||||
@@ -32,8 +32,8 @@ import (
|
||||
cfgldap "github.com/minio/minio/internal/config/identity/ldap"
|
||||
"github.com/minio/minio/internal/config/identity/openid"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/ldap"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, isUpdate bool) {
|
||||
|
||||
@@ -20,6 +20,7 @@ package cmd
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -27,7 +28,8 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
xldap "github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// ListLDAPPolicyMappingEntities lists users/groups mapped to given/all policies.
|
||||
@@ -188,7 +190,7 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
|
||||
//
|
||||
// PUT /minio/admin/v3/idp/ldap/add-service-account
|
||||
func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r)
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r, true)
|
||||
if APIError.Code != "" {
|
||||
writeErrorResponseJSON(ctx, w, APIError, r.URL)
|
||||
return
|
||||
@@ -236,12 +238,12 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
targetGroups = requestorGroups
|
||||
|
||||
// Deny if the target user is not LDAP
|
||||
foundLDAPDN, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
|
||||
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if foundLDAPDN == "" {
|
||||
if foundResult == nil {
|
||||
err := errors.New("Specified user does not exist on LDAP server")
|
||||
APIErr := errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err)
|
||||
writeErrorResponseJSON(ctx, w, APIErr, r.URL)
|
||||
@@ -264,7 +266,8 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
|
||||
isDN := globalIAMSys.LDAPConfig.ParsesAsDN(targetUser)
|
||||
opts.claims[ldapUserN] = targetUser // simple username
|
||||
targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
var lookupResult *xldap.DNSearchResult
|
||||
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
if err != nil {
|
||||
// if not found, check if DN
|
||||
if strings.Contains(err.Error(), "User DN not found for:") {
|
||||
@@ -278,7 +281,26 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targetUser = lookupResult.NormDN
|
||||
opts.claims[ldapUser] = targetUser // DN
|
||||
opts.claims[ldapActualUser] = lookupResult.ActualDN
|
||||
|
||||
// Check if this user or their groups have a policy applied.
|
||||
ldapPolicies, err := globalIAMSys.PolicyDBGet(targetUser, targetGroups...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if len(ldapPolicies) == 0 {
|
||||
err = fmt.Errorf("No policy set for user `%s` or any of their groups: `%s`", opts.claims[ldapActualUser], strings.Join(targetGroups, "`,`"))
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Add LDAP attributes that were looked up into the claims.
|
||||
for attribKey, attribValue := range lookupResult.Attributes {
|
||||
opts.claims[ldapAttribPrefix+attribKey] = attribValue
|
||||
}
|
||||
}
|
||||
|
||||
newCred, updatedAt, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts)
|
||||
@@ -385,15 +407,16 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
targetAccount, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
|
||||
dnResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if targetAccount == "" {
|
||||
if dnResult == nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errNoSuchUser), r.URL)
|
||||
return
|
||||
}
|
||||
targetAccount := dnResult.NormDN
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
if listType != "sts-only" && listType != "svcacc-only" && listType != "" {
|
||||
@@ -456,3 +479,178 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// ListAccessKeysLDAPBulk - GET /minio/admin/v3/idp/ldap/list-access-keys-bulk
|
||||
func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dnList := r.Form["userDNs"]
|
||||
isAll := r.Form.Get("all") == "true"
|
||||
selfOnly := !isAll && len(dnList) == 0
|
||||
|
||||
if isAll && len(dnList) > 0 {
|
||||
// This should be checked on client side, so return generic error
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Empty DN list and not self, list access keys for all users
|
||||
if isAll {
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListUsersAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
} else if len(dnList) == 1 {
|
||||
var dn string
|
||||
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(dnList[0])
|
||||
if err == nil {
|
||||
dn = foundResult.NormDN
|
||||
}
|
||||
if dn == cred.ParentUser || dnList[0] == cred.ParentUser {
|
||||
selfOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: selfOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if selfOnly && len(dnList) == 0 {
|
||||
selfDN := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
selfDN = cred.ParentUser
|
||||
}
|
||||
dnList = append(dnList, selfDN)
|
||||
}
|
||||
|
||||
var ldapUserList []string
|
||||
if isAll {
|
||||
ldapUsers, err := globalIAMSys.ListLDAPUsers(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for user := range ldapUsers {
|
||||
ldapUserList = append(ldapUserList, user)
|
||||
}
|
||||
} else {
|
||||
for _, userDN := range dnList {
|
||||
// Validate the userDN
|
||||
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if foundResult == nil {
|
||||
continue
|
||||
}
|
||||
ldapUserList = append(ldapUserList, foundResult.NormDN)
|
||||
}
|
||||
}
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
var listSTSKeys, listServiceAccounts bool
|
||||
switch listType {
|
||||
case madmin.AccessKeyListUsersOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSTSOnly:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSvcaccOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = true
|
||||
case madmin.AccessKeyListAll:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = true
|
||||
default:
|
||||
err := errors.New("invalid list type")
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKeyMap := make(map[string]madmin.ListAccessKeysLDAPResp)
|
||||
for _, internalDN := range ldapUserList {
|
||||
externalDN := globalIAMSys.LDAPConfig.DecodeDN(internalDN)
|
||||
accessKeys := madmin.ListAccessKeysLDAPResp{}
|
||||
if listSTSKeys {
|
||||
stsKeys, err := globalIAMSys.ListSTSAccounts(ctx, internalDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, sts := range stsKeys {
|
||||
accessKeys.STSKeys = append(accessKeys.STSKeys, madmin.ServiceAccountInfo{
|
||||
AccessKey: sts.AccessKey,
|
||||
Expiration: &sts.Expiration,
|
||||
})
|
||||
}
|
||||
// if only STS keys, skip if user has no STS keys
|
||||
if !listServiceAccounts && len(stsKeys) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if listServiceAccounts {
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, internalDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, svc := range serviceAccounts {
|
||||
accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &svc.Expiration,
|
||||
})
|
||||
}
|
||||
// if only service accounts, skip if user has no service accounts
|
||||
if !listSTSKeys && len(serviceAccounts) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
accessKeyMap[externalDN] = accessKeys
|
||||
}
|
||||
|
||||
data, err := json.Marshal(accessKeyMap)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
@@ -27,8 +27,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -374,6 +374,7 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request)
|
||||
globalNotificationSys.StopRebalance(r.Context())
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
adminLogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
|
||||
globalNotificationSys.LoadRebalanceMeta(ctx, false)
|
||||
}
|
||||
|
||||
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// SiteReplicationAdd - PUT /minio/admin/v3/site-replication/add
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/pkg/v2/sync/errgroup"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
)
|
||||
|
||||
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
|
||||
@@ -120,9 +120,12 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
userReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
}
|
||||
if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
accessKeys[i] = accessKey
|
||||
|
||||
@@ -26,17 +26,20 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/cachevalue"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
xldap "github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
@@ -473,6 +476,11 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if !utf8.ValidString(accessKey) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserValidUTF), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
checkDenyOnly := false
|
||||
if accessKey == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
@@ -629,7 +637,7 @@ func (a adminAPIHandlers) TemporaryAccountInfo(w http.ResponseWriter, r *http.Re
|
||||
|
||||
// AddServiceAccount - PUT /minio/admin/v3/add-service-account
|
||||
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r)
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r, false)
|
||||
if APIError.Code != "" {
|
||||
writeErrorResponseJSON(ctx, w, APIError, r.URL)
|
||||
return
|
||||
@@ -700,12 +708,20 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
// In case of LDAP we need to resolve the targetUser to a DN and
|
||||
// query their groups:
|
||||
opts.claims[ldapUserN] = targetUser // simple username
|
||||
targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
var lookupResult *xldap.DNSearchResult
|
||||
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targetUser = lookupResult.NormDN
|
||||
opts.claims[ldapUser] = targetUser // username DN
|
||||
opts.claims[ldapActualUser] = lookupResult.ActualDN
|
||||
|
||||
// Add LDAP attributes that were looked up into the claims.
|
||||
for attribKey, attribValue := range lookupResult.Attributes {
|
||||
opts.claims[ldapAttribPrefix+attribKey] = attribValue
|
||||
}
|
||||
|
||||
// NOTE: if not using LDAP, then internal IDP or open ID is
|
||||
// being used - in the former, group info is enforced when
|
||||
@@ -815,7 +831,11 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
condValues := getConditionValues(r, "", cred)
|
||||
addExpirationToCondValues(updateReq.NewExpiration, condValues)
|
||||
err = addExpirationToCondValues(updateReq.NewExpiration, condValues)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Permission checks:
|
||||
//
|
||||
@@ -1147,6 +1167,172 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// ListAccessKeysBulk - GET /minio/admin/v3/list-access-keys-bulk
|
||||
func (a adminAPIHandlers) ListAccessKeysBulk(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
users := r.Form["users"]
|
||||
isAll := r.Form.Get("all") == "true"
|
||||
selfOnly := !isAll && len(users) == 0
|
||||
|
||||
if isAll && len(users) > 0 {
|
||||
// This should be checked on client side, so return generic error
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Empty user list and not self, list access keys for all users
|
||||
if isAll {
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListUsersAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
} else if len(users) == 1 {
|
||||
if users[0] == cred.AccessKey || users[0] == cred.ParentUser {
|
||||
selfOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: selfOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if selfOnly && len(users) == 0 {
|
||||
selfUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
selfUser = cred.ParentUser
|
||||
}
|
||||
users = append(users, selfUser)
|
||||
}
|
||||
|
||||
var checkedUserList []string
|
||||
if isAll {
|
||||
users, err := globalIAMSys.ListUsers(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for user := range users {
|
||||
checkedUserList = append(checkedUserList, user)
|
||||
}
|
||||
checkedUserList = append(checkedUserList, globalActiveCred.AccessKey)
|
||||
} else {
|
||||
for _, user := range users {
|
||||
// Validate the user
|
||||
_, ok := globalIAMSys.GetUser(ctx, user)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
checkedUserList = append(checkedUserList, user)
|
||||
}
|
||||
}
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
var listSTSKeys, listServiceAccounts bool
|
||||
switch listType {
|
||||
case madmin.AccessKeyListUsersOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSTSOnly:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSvcaccOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = true
|
||||
case madmin.AccessKeyListAll:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = true
|
||||
default:
|
||||
err := errors.New("invalid list type")
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKeyMap := make(map[string]madmin.ListAccessKeysResp)
|
||||
for _, user := range checkedUserList {
|
||||
accessKeys := madmin.ListAccessKeysResp{}
|
||||
if listSTSKeys {
|
||||
stsKeys, err := globalIAMSys.ListSTSAccounts(ctx, user)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, sts := range stsKeys {
|
||||
accessKeys.STSKeys = append(accessKeys.STSKeys, madmin.ServiceAccountInfo{
|
||||
AccessKey: sts.AccessKey,
|
||||
Expiration: &sts.Expiration,
|
||||
})
|
||||
}
|
||||
// if only STS keys, skip if user has no STS keys
|
||||
if !listServiceAccounts && len(stsKeys) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if listServiceAccounts {
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, user)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, svc := range serviceAccounts {
|
||||
accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &svc.Expiration,
|
||||
})
|
||||
}
|
||||
// if only service accounts, skip if user has no service accounts
|
||||
if !listSTSKeys && len(serviceAccounts) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
accessKeyMap[user] = accessKeys
|
||||
}
|
||||
|
||||
data, err := json.Marshal(accessKeyMap)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// AccountInfoHandler returns usage, permissions and other bucket metadata for incoming us
|
||||
func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
@@ -1216,18 +1402,6 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
return rd, wr
|
||||
}
|
||||
|
||||
bucketStorageCache.InitOnce(10*time.Second,
|
||||
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
|
||||
func() (DataUsageInfo, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objectAPI)
|
||||
},
|
||||
)
|
||||
|
||||
dataUsageInfo, _ := bucketStorageCache.Get()
|
||||
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
var err error
|
||||
var buckets []BucketInfo
|
||||
@@ -1268,7 +1442,12 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
var buf []byte
|
||||
switch {
|
||||
case accountName == globalActiveCred.AccessKey:
|
||||
case accountName == globalActiveCred.AccessKey || newGlobalAuthZPluginFn() != nil:
|
||||
// For owner account and when plugin authZ is configured always set
|
||||
// effective policy as `consoleAdmin`.
|
||||
//
|
||||
// In the latter case, we let the UI render everything, but individual
|
||||
// actions would fail if not permitted by the external authZ service.
|
||||
for _, policy := range policy.DefaultPolicies {
|
||||
if policy.Name == "consoleAdmin" {
|
||||
effectivePolicy = policy.Definition
|
||||
@@ -1314,15 +1493,12 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
rd, wr := isAllowedAccess(bucket.Name)
|
||||
if rd || wr {
|
||||
// Fetch the data usage of the current bucket
|
||||
var size uint64
|
||||
var objectsCount uint64
|
||||
var objectsHist, versionsHist map[string]uint64
|
||||
if !dataUsageInfo.LastUpdate.IsZero() {
|
||||
size = dataUsageInfo.BucketsUsage[bucket.Name].Size
|
||||
objectsCount = dataUsageInfo.BucketsUsage[bucket.Name].ObjectsCount
|
||||
objectsHist = dataUsageInfo.BucketsUsage[bucket.Name].ObjectSizesHistogram
|
||||
versionsHist = dataUsageInfo.BucketsUsage[bucket.Name].ObjectVersionsHistogram
|
||||
}
|
||||
bui := globalBucketQuotaSys.GetBucketUsageInfo(ctx, bucket.Name)
|
||||
size := bui.Size
|
||||
objectsCount := bui.ObjectsCount
|
||||
objectsHist := bui.ObjectSizesHistogram
|
||||
versionsHist := bui.ObjectVersionsHistogram
|
||||
|
||||
// Fetch the prefix usage of the current bucket
|
||||
var prefixUsage map[string]uint64
|
||||
if enablePrefixUsage {
|
||||
@@ -1636,22 +1812,22 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
// form of the entityName (which will be an LDAP DN).
|
||||
var err error
|
||||
if isGroup {
|
||||
var foundGroupDN string
|
||||
var foundGroupDN *xldap.DNSearchResult
|
||||
var underBaseDN bool
|
||||
if foundGroupDN, underBaseDN, err = globalIAMSys.LDAPConfig.GetValidatedGroupDN(nil, entityName); err != nil {
|
||||
iamLogIf(ctx, err)
|
||||
} else if foundGroupDN == "" || !underBaseDN {
|
||||
} else if foundGroupDN == nil || !underBaseDN {
|
||||
err = errNoSuchGroup
|
||||
}
|
||||
entityName = foundGroupDN
|
||||
entityName = foundGroupDN.NormDN
|
||||
} else {
|
||||
var foundUserDN string
|
||||
var foundUserDN *xldap.DNSearchResult
|
||||
if foundUserDN, err = globalIAMSys.LDAPConfig.GetValidatedDNForUsername(entityName); err != nil {
|
||||
iamLogIf(ctx, err)
|
||||
} else if foundUserDN == "" {
|
||||
} else if foundUserDN == nil {
|
||||
err = errNoSuchUser
|
||||
}
|
||||
entityName = foundUserDN
|
||||
entityName = foundUserDN.NormDN
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -2043,6 +2219,16 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// ImportIAM - imports all IAM info into MinIO
|
||||
func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
a.importIAM(w, r, "")
|
||||
}
|
||||
|
||||
// ImportIAMV2 - imports all IAM info into MinIO
|
||||
func (a adminAPIHandlers) ImportIAMV2(w http.ResponseWriter, r *http.Request) {
|
||||
a.importIAM(w, r, "v2")
|
||||
}
|
||||
|
||||
// ImportIAM - imports all IAM info into MinIO
|
||||
func (a adminAPIHandlers) importIAM(w http.ResponseWriter, r *http.Request, apiVer string) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
@@ -2067,6 +2253,10 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var skipped, removed, added madmin.IAMEntities
|
||||
var failed madmin.IAMErrEntities
|
||||
|
||||
// import policies first
|
||||
{
|
||||
|
||||
@@ -2092,8 +2282,10 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
for policyName, policy := range allPolicies {
|
||||
if policy.IsEmpty() {
|
||||
err = globalIAMSys.DeletePolicy(ctx, policyName, true)
|
||||
removed.Policies = append(removed.Policies, policyName)
|
||||
} else {
|
||||
_, err = globalIAMSys.SetPolicy(ctx, policyName, policy)
|
||||
added.Policies = append(added.Policies, policyName)
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allPoliciesFile, policyName), r.URL)
|
||||
@@ -2172,8 +2364,9 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
if _, err = globalIAMSys.CreateUser(ctx, accessKey, ureq); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, toAdminAPIErrCode(ctx, err), err, allUsersFile, accessKey), r.URL)
|
||||
return
|
||||
failed.Users = append(failed.Users, madmin.IAMErrEntity{Name: accessKey, Error: err})
|
||||
} else {
|
||||
added.Users = append(added.Users, accessKey)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -2211,8 +2404,9 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
if _, gerr := globalIAMSys.AddUsersToGroup(ctx, group, grpInfo.Members); gerr != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, gerr, allGroupsFile, group), r.URL)
|
||||
return
|
||||
failed.Groups = append(failed.Groups, madmin.IAMErrEntity{Name: group, Error: err})
|
||||
} else {
|
||||
added.Groups = append(added.Groups, group)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2241,7 +2435,8 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
err := globalIAMSys.NormalizeLDAPAccessKeypairs(ctx, serviceAcctReqs)
|
||||
skippedAccessKeys, err := globalIAMSys.NormalizeLDAPAccessKeypairs(ctx, serviceAcctReqs)
|
||||
skipped.ServiceAccounts = append(skipped.ServiceAccounts, skippedAccessKeys...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, ""), r.URL)
|
||||
return
|
||||
@@ -2249,6 +2444,9 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
for user, svcAcctReq := range serviceAcctReqs {
|
||||
if slices.Contains(skipped.ServiceAccounts, user) {
|
||||
continue
|
||||
}
|
||||
var sp *policy.Policy
|
||||
var err error
|
||||
if len(svcAcctReq.SessionPolicy) > 0 {
|
||||
@@ -2289,7 +2487,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
// clean import.
|
||||
err := globalIAMSys.DeleteServiceAccount(ctx, svcAcctReq.AccessKey, true)
|
||||
if err != nil {
|
||||
delErr := fmt.Errorf("failed to delete existing service account(%s) before importing it: %w", svcAcctReq.AccessKey, err)
|
||||
delErr := fmt.Errorf("failed to delete existing service account (%s) before importing it: %w", svcAcctReq.AccessKey, err)
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, delErr, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -2306,10 +2504,10 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if _, _, err = globalIAMSys.NewServiceAccount(ctx, svcAcctReq.Parent, svcAcctReq.Groups, opts); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
failed.ServiceAccounts = append(failed.ServiceAccounts, madmin.IAMErrEntity{Name: user, Error: err})
|
||||
} else {
|
||||
added.ServiceAccounts = append(added.ServiceAccounts, user)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2346,8 +2544,15 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, u, pm.Policies, regUser, false); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, userPolicyMappingsFile, u), r.URL)
|
||||
return
|
||||
failed.UserPolicies = append(
|
||||
failed.UserPolicies,
|
||||
madmin.IAMErrPolicyEntity{
|
||||
Name: u,
|
||||
Policies: strings.Split(pm.Policies, ","),
|
||||
Error: err,
|
||||
})
|
||||
} else {
|
||||
added.UserPolicies = append(added.UserPolicies, map[string][]string{u: strings.Split(pm.Policies, ",")})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2377,7 +2582,8 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
isGroup := true
|
||||
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, isGroup, grpPolicyMap)
|
||||
skippedDN, err := globalIAMSys.NormalizeLDAPMappingImport(ctx, isGroup, grpPolicyMap)
|
||||
skipped.Groups = append(skipped.Groups, skippedDN...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
@@ -2385,9 +2591,19 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
for g, pm := range grpPolicyMap {
|
||||
if slices.Contains(skipped.Groups, g) {
|
||||
continue
|
||||
}
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, g, pm.Policies, unknownIAMUserType, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, g), r.URL)
|
||||
return
|
||||
failed.GroupPolicies = append(
|
||||
failed.GroupPolicies,
|
||||
madmin.IAMErrPolicyEntity{
|
||||
Name: g,
|
||||
Policies: strings.Split(pm.Policies, ","),
|
||||
Error: err,
|
||||
})
|
||||
} else {
|
||||
added.GroupPolicies = append(added.GroupPolicies, map[string][]string{g: strings.Split(pm.Policies, ",")})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2417,13 +2633,17 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
isGroup := true
|
||||
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, !isGroup, userPolicyMap)
|
||||
skippedDN, err := globalIAMSys.NormalizeLDAPMappingImport(ctx, !isGroup, userPolicyMap)
|
||||
skipped.Users = append(skipped.Users, skippedDN...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
for u, pm := range userPolicyMap {
|
||||
if slices.Contains(skipped.Users, u) {
|
||||
continue
|
||||
}
|
||||
// disallow setting policy mapping if user is a temporary user
|
||||
ok, _, err := globalIAMSys.IsTempUser(u)
|
||||
if err != nil && err != errNoSuchUser {
|
||||
@@ -2436,22 +2656,51 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, u, pm.Policies, stsUser, false); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, u), r.URL)
|
||||
return
|
||||
failed.STSPolicies = append(
|
||||
failed.STSPolicies,
|
||||
madmin.IAMErrPolicyEntity{
|
||||
Name: u,
|
||||
Policies: strings.Split(pm.Policies, ","),
|
||||
Error: err,
|
||||
})
|
||||
} else {
|
||||
added.STSPolicies = append(added.STSPolicies, map[string][]string{u: strings.Split(pm.Policies, ",")})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) {
|
||||
if exp == nil {
|
||||
return
|
||||
if apiVer == "v2" {
|
||||
iamr := madmin.ImportIAMResult{
|
||||
Skipped: skipped,
|
||||
Removed: removed,
|
||||
Added: added,
|
||||
Failed: failed,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(iamr)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, b)
|
||||
}
|
||||
condValues["DurationSeconds"] = []string{strconv.FormatInt(int64(exp.Sub(time.Now()).Seconds()), 10)}
|
||||
}
|
||||
|
||||
func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials, newServiceAccountOpts, madmin.AddServiceAccountReq, string, APIError) {
|
||||
func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) error {
|
||||
if exp == nil || exp.IsZero() || exp.Equal(timeSentinel) {
|
||||
return nil
|
||||
}
|
||||
dur := exp.Sub(time.Now())
|
||||
if dur <= 0 {
|
||||
return errors.New("unsupported expiration time")
|
||||
}
|
||||
condValues["DurationSeconds"] = []string{strconv.FormatInt(int64(dur.Seconds()), 10)}
|
||||
return nil
|
||||
}
|
||||
|
||||
func commonAddServiceAccount(r *http.Request, ldap bool) (context.Context, auth.Credentials, newServiceAccountOpts, madmin.AddServiceAccountReq, string, APIError) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
@@ -2476,6 +2725,12 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err)
|
||||
}
|
||||
|
||||
if createReq.Expiration != nil && !createReq.Expiration.IsZero() {
|
||||
// truncate expiration at the second.
|
||||
truncateTime := createReq.Expiration.Truncate(time.Second)
|
||||
createReq.Expiration = &truncateTime
|
||||
}
|
||||
|
||||
// service account access key cannot have space characters beginning and end of the string.
|
||||
if hasSpaceBE(createReq.AccessKey) {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument)
|
||||
@@ -2507,7 +2762,18 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
||||
}
|
||||
|
||||
condValues := getConditionValues(r, "", cred)
|
||||
addExpirationToCondValues(createReq.Expiration, condValues)
|
||||
err = addExpirationToCondValues(createReq.Expiration, condValues)
|
||||
if err != nil {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", toAdminAPIErr(ctx, err)
|
||||
}
|
||||
|
||||
denyOnly := (targetUser == cred.AccessKey || targetUser == cred.ParentUser)
|
||||
if ldap && !denyOnly {
|
||||
res, _ := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
|
||||
if res.NormDN == cred.ParentUser {
|
||||
denyOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check if action is allowed if creating access key for another user
|
||||
// Check if action is explicitly denied if for self
|
||||
@@ -2518,7 +2784,7 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
||||
ConditionValues: condValues,
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: (targetUser == cred.AccessKey || targetUser == cred.ParentUser),
|
||||
DenyOnly: denyOnly,
|
||||
}) {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrAccessDenied)
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -39,7 +40,7 @@ import (
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/signer"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -239,9 +240,12 @@ func (s *TestSuiteIAM) TestUserCreate(c *check) {
|
||||
c.Assert(v.Status, madmin.AccountEnabled)
|
||||
|
||||
// 3. Associate policy and check that user can access
|
||||
err = s.adm.SetPolicy(ctx, "readwrite", accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{"readwrite"},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
client := s.getUserClient(c, accessKey, secretKey, "")
|
||||
@@ -334,23 +338,34 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
// 2.3 check user has access to bucket
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
@@ -436,7 +451,7 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::testbucket/*"
|
||||
"arn:aws:s3:::testbucket"
|
||||
]
|
||||
}
|
||||
]
|
||||
@@ -470,9 +485,12 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
|
||||
c.mustNotListObjects(ctx, uClient, "testbucket")
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy1, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy1},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
admClnt := s.getAdminClient(c, accessKey, secretKey, "")
|
||||
@@ -490,10 +508,22 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
|
||||
c.Fatalf("policy was missing!")
|
||||
}
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy2, accessKey, false)
|
||||
// Detach policy1 to set up for policy2
|
||||
_, err = s.adm.DetachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy1},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to detach policy: %v", err)
|
||||
}
|
||||
|
||||
// 3.2 associate policy to user
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy2},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// 3.3 check user can create service account implicitly.
|
||||
@@ -538,16 +568,24 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -571,9 +609,12 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
// 3.3 check user has access to bucket
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
@@ -645,16 +686,24 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
|
||||
// Check that default policies can be overwritten.
|
||||
err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes)
|
||||
@@ -690,16 +739,24 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -726,9 +783,12 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 3. Associate policy to group and check user got access.
|
||||
err = s.adm.SetPolicy(ctx, policy, group, true)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
Group: group,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
// 3.1 check user has access to bucket
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
@@ -743,8 +803,9 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
if err != nil {
|
||||
c.Fatalf("group list err: %v", err)
|
||||
}
|
||||
if !set.CreateStringSet(groups...).Contains(group) {
|
||||
c.Fatalf("created group not present!")
|
||||
expected := []string{group}
|
||||
if !slices.Equal(groups, expected) {
|
||||
c.Fatalf("expected group listing: %v, got: %v", expected, groups)
|
||||
}
|
||||
groupInfo, err := s.adm.GetGroupDescription(ctx, group)
|
||||
if err != nil {
|
||||
@@ -850,16 +911,24 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -871,9 +940,12 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// Create an madmin client with user creds
|
||||
@@ -931,16 +1003,24 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -952,9 +1032,12 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// Create an madmin client with user creds
|
||||
@@ -1010,16 +1093,24 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -1031,9 +1122,12 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// 1. Create a service account for the user
|
||||
@@ -1564,7 +1658,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
@@ -59,9 +59,9 @@ import (
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/secure-io/sio-go"
|
||||
"github.com/zeebo/xxh3"
|
||||
)
|
||||
@@ -93,11 +93,18 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
return
|
||||
}
|
||||
|
||||
if globalInplaceUpdateDisabled || currentReleaseTime.IsZero() {
|
||||
if globalInplaceUpdateDisabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if currentReleaseTime.IsZero() || currentReleaseTime.Equal(timeSentinel) {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMethodNotAllowed)
|
||||
apiErr.Description = fmt.Sprintf("unable to perform in-place update, release time is unrecognized: %s", currentReleaseTime)
|
||||
writeErrorResponseJSON(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
updateURL := vars["updateURL"]
|
||||
dryRun := r.Form.Get("dry-run") == "true"
|
||||
@@ -110,6 +117,11 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
}
|
||||
|
||||
local := globalLocalNodeName
|
||||
if local == "" {
|
||||
local = "127.0.0.1"
|
||||
}
|
||||
|
||||
u, err := url.Parse(updateURL)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -128,6 +140,39 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
return
|
||||
}
|
||||
|
||||
updateStatus := madmin.ServerUpdateStatusV2{
|
||||
DryRun: dryRun,
|
||||
Results: make([]madmin.ServerPeerUpdateStatus, 0, len(globalNotificationSys.allPeerClients)),
|
||||
}
|
||||
peerResults := make(map[string]madmin.ServerPeerUpdateStatus, len(globalNotificationSys.allPeerClients))
|
||||
failedClients := make(map[int]bool, len(globalNotificationSys.allPeerClients))
|
||||
|
||||
if lrTime.Sub(currentReleaseTime) <= 0 {
|
||||
updateStatus.Results = append(updateStatus.Results, madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
Err: fmt.Sprintf("server is running the latest version: %s", Version),
|
||||
CurrentVersion: Version,
|
||||
})
|
||||
|
||||
for _, client := range globalNotificationSys.peerClients {
|
||||
updateStatus.Results = append(updateStatus.Results, madmin.ServerPeerUpdateStatus{
|
||||
Host: client.String(),
|
||||
Err: fmt.Sprintf("server is running the latest version: %s", Version),
|
||||
CurrentVersion: Version,
|
||||
})
|
||||
}
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(updateStatus)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
return
|
||||
}
|
||||
|
||||
u.Path = path.Dir(u.Path) + SlashSeparator + releaseInfo
|
||||
// Download Binary Once
|
||||
binC, bin, err := downloadBinary(u, mode)
|
||||
@@ -137,16 +182,6 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
return
|
||||
}
|
||||
|
||||
updateStatus := madmin.ServerUpdateStatusV2{DryRun: dryRun}
|
||||
peerResults := make(map[string]madmin.ServerPeerUpdateStatus)
|
||||
|
||||
local := globalLocalNodeName
|
||||
if local == "" {
|
||||
local = "127.0.0.1"
|
||||
}
|
||||
|
||||
failedClients := make(map[int]struct{})
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Push binary to other servers
|
||||
for idx, nerr := range globalNotificationSys.VerifyBinary(ctx, u, sha256Sum, releaseInfo, binC) {
|
||||
@@ -156,7 +191,7 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
Err: nerr.Err.Error(),
|
||||
CurrentVersion: Version,
|
||||
}
|
||||
failedClients[idx] = struct{}{}
|
||||
failedClients[idx] = true
|
||||
} else {
|
||||
peerResults[nerr.Host.String()] = madmin.ServerPeerUpdateStatus{
|
||||
Host: nerr.Host.String(),
|
||||
@@ -167,25 +202,17 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
}
|
||||
|
||||
if lrTime.Sub(currentReleaseTime) > 0 {
|
||||
if err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin)); err != nil {
|
||||
peerResults[local] = madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
Err: err.Error(),
|
||||
CurrentVersion: Version,
|
||||
}
|
||||
} else {
|
||||
peerResults[local] = madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
CurrentVersion: Version,
|
||||
UpdatedVersion: lrTime.Format(MinioReleaseTagTimeLayout),
|
||||
}
|
||||
if err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin)); err != nil {
|
||||
peerResults[local] = madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
Err: err.Error(),
|
||||
CurrentVersion: Version,
|
||||
}
|
||||
} else {
|
||||
peerResults[local] = madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
Err: fmt.Sprintf("server is already running the latest version: %s", Version),
|
||||
CurrentVersion: Version,
|
||||
UpdatedVersion: lrTime.Format(MinioReleaseTagTimeLayout),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,8 +220,7 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
if globalIsDistErasure {
|
||||
ng := WithNPeers(len(globalNotificationSys.peerClients))
|
||||
for idx, client := range globalNotificationSys.peerClients {
|
||||
_, ok := failedClients[idx]
|
||||
if ok {
|
||||
if failedClients[idx] {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
@@ -237,17 +263,18 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Notify all other MinIO peers signal service.
|
||||
startTime := time.Now().Add(restartUpdateDelay)
|
||||
ng := WithNPeers(len(globalNotificationSys.peerClients))
|
||||
for idx, client := range globalNotificationSys.peerClients {
|
||||
_, ok := failedClients[idx]
|
||||
if ok {
|
||||
if failedClients[idx] {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
prs, ok := peerResults[client.String()]
|
||||
if ok && prs.CurrentVersion != prs.UpdatedVersion && prs.UpdatedVersion != "" {
|
||||
return client.SignalService(serviceRestart, "", dryRun)
|
||||
// We restart only on success, not for any failures.
|
||||
if ok && prs.Err == "" {
|
||||
return client.SignalService(serviceRestart, "", dryRun, &startTime)
|
||||
}
|
||||
return nil
|
||||
}, idx, *client.host)
|
||||
@@ -283,7 +310,9 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
|
||||
if !dryRun {
|
||||
if lrTime.Sub(currentReleaseTime) > 0 {
|
||||
prs, ok := peerResults[local]
|
||||
// We restart only on success, not for any failures.
|
||||
if ok && prs.Err == "" {
|
||||
globalServiceSignalCh <- serviceRestart
|
||||
}
|
||||
}
|
||||
@@ -542,9 +571,12 @@ func (a adminAPIHandlers) ServiceV2Handler(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
|
||||
var objectAPI ObjectLayer
|
||||
var execAt *time.Time
|
||||
switch serviceSig {
|
||||
case serviceRestart:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceRestartAdminAction)
|
||||
t := time.Now().Add(restartUpdateDelay)
|
||||
execAt = &t
|
||||
case serviceStop:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceStopAdminAction)
|
||||
case serviceFreeze, serviceUnFreeze:
|
||||
@@ -571,7 +603,7 @@ func (a adminAPIHandlers) ServiceV2Handler(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
|
||||
if globalIsDistErasure {
|
||||
for _, nerr := range globalNotificationSys.SignalServiceV2(serviceSig, dryRun) {
|
||||
for _, nerr := range globalNotificationSys.SignalServiceV2(serviceSig, dryRun, execAt) {
|
||||
if nerr.Err != nil && process {
|
||||
waitingDrives := map[string]madmin.DiskMetrics{}
|
||||
jerr := json.Unmarshal([]byte(nerr.Err.Error()), &waitingDrives)
|
||||
@@ -844,9 +876,10 @@ func (a adminAPIHandlers) DataUsageInfoHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
func lriToLockEntry(l lockRequesterInfo, now time.Time, resource, server string) *madmin.LockEntry {
|
||||
t := time.Unix(0, l.Timestamp)
|
||||
entry := &madmin.LockEntry{
|
||||
Timestamp: l.Timestamp,
|
||||
Elapsed: now.Sub(l.Timestamp),
|
||||
Timestamp: t,
|
||||
Elapsed: now.Sub(t),
|
||||
Resource: resource,
|
||||
ServerList: []string{server},
|
||||
Source: l.Source,
|
||||
@@ -1031,7 +1064,7 @@ func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.R
|
||||
// Start profiling on remote servers.
|
||||
var hostErrs []NotificationPeerErr
|
||||
for _, profiler := range profiles {
|
||||
hostErrs = append(hostErrs, globalNotificationSys.StartProfiling(profiler)...)
|
||||
hostErrs = append(hostErrs, globalNotificationSys.StartProfiling(ctx, profiler)...)
|
||||
|
||||
// Start profiling locally as well.
|
||||
prof, err := startProfiler(profiler)
|
||||
@@ -1112,7 +1145,11 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
// Start profiling on remote servers.
|
||||
for _, profiler := range profiles {
|
||||
globalNotificationSys.StartProfiling(profiler)
|
||||
// Limit start time to max 10s.
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
globalNotificationSys.StartProfiling(ctx, profiler)
|
||||
// StartProfiling blocks, so we can cancel now.
|
||||
cancel()
|
||||
|
||||
// Start profiling locally as well.
|
||||
prof, err := startProfiler(profiler)
|
||||
@@ -1127,6 +1164,10 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Stop remote profiles
|
||||
go globalNotificationSys.DownloadProfilingData(GlobalContext, io.Discard)
|
||||
|
||||
// Stop local
|
||||
globalProfilerMu.Lock()
|
||||
defer globalProfilerMu.Unlock()
|
||||
for k, v := range globalProfiler {
|
||||
@@ -1431,7 +1472,7 @@ func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmi
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Get heal status from other peers
|
||||
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus()
|
||||
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus(ctx)
|
||||
var errCount int
|
||||
for _, nerr := range nerrs {
|
||||
if nerr.Err != nil {
|
||||
@@ -2173,14 +2214,16 @@ func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
if err := GlobalKMS.CreateKey(ctx, r.Form.Get("key-id")); err != nil {
|
||||
if err := GlobalKMS.CreateKey(ctx, &kms.CreateKeyRequest{
|
||||
Name: r.Form.Get("key-id"),
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// KMSKeyStatusHandler - GET /minio/admin/v3/kms/status
|
||||
// KMSStatusHandler - GET /minio/admin/v3/kms/status
|
||||
func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@@ -2194,22 +2237,12 @@ func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
stat, err := GlobalKMS.Stat(ctx)
|
||||
stat, err := GlobalKMS.Status(ctx)
|
||||
if err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
status := madmin.KMSStatus{
|
||||
Name: stat.Name,
|
||||
DefaultKeyID: stat.DefaultKey,
|
||||
Endpoints: make(map[string]madmin.ItemState, len(stat.Endpoints)),
|
||||
}
|
||||
for _, endpoint := range stat.Endpoints {
|
||||
status.Endpoints[endpoint] = madmin.ItemOnline // TODO(aead): Implement an online check for mTLS
|
||||
}
|
||||
|
||||
resp, err := json.Marshal(status)
|
||||
resp, err := json.Marshal(stat)
|
||||
if err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
||||
return
|
||||
@@ -2231,15 +2264,9 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
stat, err := GlobalKMS.Stat(ctx)
|
||||
if err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
keyID := r.Form.Get("key-id")
|
||||
if keyID == "" {
|
||||
keyID = stat.DefaultKey
|
||||
keyID = GlobalKMS.DefaultKey
|
||||
}
|
||||
response := madmin.KMSKeyStatus{
|
||||
KeyID: keyID,
|
||||
@@ -2247,7 +2274,10 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
kmsContext := kms.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation
|
||||
// 1. Generate a new key using the KMS.
|
||||
key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsContext)
|
||||
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: keyID,
|
||||
AssociatedData: kmsContext,
|
||||
})
|
||||
if err != nil {
|
||||
response.EncryptionErr = err.Error()
|
||||
resp, err := json.Marshal(response)
|
||||
@@ -2260,7 +2290,11 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// 2. Verify that we can indeed decrypt the (encrypted) key
|
||||
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
|
||||
decryptedKey, err := GlobalKMS.Decrypt(ctx, &kms.DecryptRequest{
|
||||
Name: key.KeyID,
|
||||
Ciphertext: key.Ciphertext,
|
||||
AssociatedData: kmsContext,
|
||||
})
|
||||
if err != nil {
|
||||
response.DecryptionErr = err.Error()
|
||||
resp, err := json.Marshal(response)
|
||||
@@ -2333,6 +2367,7 @@ func getPoolsInfo(ctx context.Context, allDisks []madmin.Disk) (map[int]map[int]
|
||||
}
|
||||
|
||||
func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) madmin.InfoMessage {
|
||||
const operationTimeout = 10 * time.Second
|
||||
ldap := madmin.LDAP{}
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
ldapConn, err := globalIAMSys.LDAPConfig.LDAP.Connect()
|
||||
@@ -2354,7 +2389,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
|
||||
notifyTarget := fetchLambdaInfo()
|
||||
|
||||
local := getLocalServerProperty(globalEndpoints, r, metrics)
|
||||
servers := globalNotificationSys.ServerInfo(metrics)
|
||||
servers := globalNotificationSys.ServerInfo(ctx, metrics)
|
||||
servers = append(servers, local)
|
||||
|
||||
var poolsInfo map[int]map[int]madmin.ErasureSetInfo
|
||||
@@ -2373,7 +2408,9 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
|
||||
mode = madmin.ItemOnline
|
||||
|
||||
// Load data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
ctx2, cancel := context.WithTimeout(ctx, operationTimeout)
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx2, objectAPI)
|
||||
cancel()
|
||||
if err == nil {
|
||||
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
|
||||
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
|
||||
@@ -2407,25 +2444,30 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
|
||||
}
|
||||
|
||||
if pools {
|
||||
poolsInfo, _ = getPoolsInfo(ctx, allDisks)
|
||||
ctx2, cancel := context.WithTimeout(ctx, operationTimeout)
|
||||
poolsInfo, _ = getPoolsInfo(ctx2, allDisks)
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
domain := globalDomainNames
|
||||
services := madmin.Services{
|
||||
KMS: fetchKMSStatus(),
|
||||
KMSStatus: fetchKMSStatusV2(ctx),
|
||||
LDAP: ldap,
|
||||
Logger: log,
|
||||
Audit: audit,
|
||||
Notifications: notifyTarget,
|
||||
}
|
||||
{
|
||||
ctx2, cancel := context.WithTimeout(ctx, operationTimeout)
|
||||
services.KMSStatus = fetchKMSStatus(ctx2)
|
||||
cancel()
|
||||
}
|
||||
|
||||
return madmin.InfoMessage{
|
||||
Mode: string(mode),
|
||||
Domain: domain,
|
||||
Region: globalSite.Region,
|
||||
SQSARN: globalEventNotifier.GetARNList(false),
|
||||
Region: globalSite.Region(),
|
||||
SQSARN: globalEventNotifier.GetARNList(),
|
||||
DeploymentID: globalDeploymentID(),
|
||||
Buckets: buckets,
|
||||
Objects: objects,
|
||||
@@ -3024,66 +3066,25 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
return notify
|
||||
}
|
||||
|
||||
// fetchKMSStatus fetches KMS-related status information.
|
||||
func fetchKMSStatus() madmin.KMS {
|
||||
kmsStat := madmin.KMS{}
|
||||
if GlobalKMS == nil {
|
||||
kmsStat.Status = "disabled"
|
||||
return kmsStat
|
||||
}
|
||||
|
||||
stat, err := GlobalKMS.Stat(context.Background())
|
||||
if err != nil {
|
||||
kmsStat.Status = string(madmin.ItemOffline)
|
||||
return kmsStat
|
||||
}
|
||||
if len(stat.Endpoints) == 0 {
|
||||
kmsStat.Status = stat.Name
|
||||
return kmsStat
|
||||
}
|
||||
kmsStat.Status = string(madmin.ItemOnline)
|
||||
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
// 1. Generate a new key using the KMS.
|
||||
key, err := GlobalKMS.GenerateKey(context.Background(), "", kmsContext)
|
||||
if err != nil {
|
||||
kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
|
||||
} else {
|
||||
kmsStat.Encrypt = "success"
|
||||
}
|
||||
|
||||
// 2. Verify that we can indeed decrypt the (encrypted) key
|
||||
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
|
||||
switch {
|
||||
case err != nil:
|
||||
kmsStat.Decrypt = fmt.Sprintf("Decryption failed: %v", err)
|
||||
case subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1:
|
||||
kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key"
|
||||
default:
|
||||
kmsStat.Decrypt = "success"
|
||||
}
|
||||
return kmsStat
|
||||
}
|
||||
|
||||
// fetchKMSStatusV2 fetches KMS-related status information for all instances
|
||||
func fetchKMSStatusV2(ctx context.Context) []madmin.KMS {
|
||||
// fetchKMSStatus fetches KMS-related status information for all instances
|
||||
func fetchKMSStatus(ctx context.Context) []madmin.KMS {
|
||||
if GlobalKMS == nil {
|
||||
return []madmin.KMS{}
|
||||
}
|
||||
|
||||
results := GlobalKMS.Verify(ctx)
|
||||
|
||||
stats := []madmin.KMS{}
|
||||
for _, result := range results {
|
||||
stats = append(stats, madmin.KMS{
|
||||
Status: result.Status,
|
||||
Endpoint: result.Endpoint,
|
||||
Encrypt: result.Encrypt,
|
||||
Decrypt: result.Decrypt,
|
||||
Version: result.Version,
|
||||
})
|
||||
stat, err := GlobalKMS.Status(ctx)
|
||||
if err != nil {
|
||||
kmsLogIf(ctx, err, "failed to fetch KMS status information")
|
||||
return []madmin.KMS{}
|
||||
}
|
||||
|
||||
stats := make([]madmin.KMS, 0, len(stat.Endpoints))
|
||||
for endpoint, state := range stat.Endpoints {
|
||||
stats = append(stats, madmin.KMS{
|
||||
Status: string(state),
|
||||
Endpoint: endpoint,
|
||||
})
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
@@ -3094,7 +3095,7 @@ func targetStatus(ctx context.Context, h logger.Target) madmin.Status {
|
||||
return madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
}
|
||||
|
||||
// fetchLoggerDetails return log info
|
||||
// fetchLoggerInfo return log info
|
||||
func fetchLoggerInfo(ctx context.Context) ([]madmin.Logger, []madmin.Audit) {
|
||||
var loggerInfo []madmin.Logger
|
||||
var auditloggerInfo []madmin.Audit
|
||||
|
||||
@@ -463,6 +463,7 @@ func TestTopLockEntries(t *testing.T) {
|
||||
Owner: lri.Owner,
|
||||
ID: lri.UID,
|
||||
Quorum: lri.Quorum,
|
||||
Timestamp: time.Unix(0, lri.Timestamp),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -63,8 +63,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
errHealIdleTimeout = errors.New("healing results were not consumed for too long")
|
||||
errHealStopSignalled = errors.New("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
apiErr := toAdminAPIErr(ctx, err)
|
||||
@@ -455,8 +455,8 @@ type healSequence struct {
|
||||
// Number of total items healed against item type
|
||||
healedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// Number of total items where healing failed against endpoint and drive state
|
||||
healFailedItemsMap map[string]int64
|
||||
// Number of total items where healing failed against item type
|
||||
healFailedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// The time of the last scan/heal activity
|
||||
lastHealActivity time.Time
|
||||
@@ -497,7 +497,7 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||
ctx: ctx,
|
||||
scannedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healFailedItemsMap: make(map[string]int64),
|
||||
healFailedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -543,12 +543,12 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
|
||||
|
||||
// getHealFailedItemsMap - returns map of all items where heal failed against
|
||||
// drive endpoint and status
|
||||
func (h *healSequence) getHealFailedItemsMap() map[string]int64 {
|
||||
func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[string]int64, len(h.healFailedItemsMap))
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
|
||||
for k, v := range h.healFailedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
@@ -556,29 +556,27 @@ func (h *healSequence) getHealFailedItemsMap() map[string]int64 {
|
||||
return retMap
|
||||
}
|
||||
|
||||
func (h *healSequence) countFailed(res madmin.HealResultItem) {
|
||||
func (h *healSequence) countFailed(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
for _, d := range res.After.Drives {
|
||||
// For failed items we report the endpoint and drive state
|
||||
// This will help users take corrective actions for drives
|
||||
h.healFailedItemsMap[d.Endpoint+","+d.State]++
|
||||
}
|
||||
|
||||
h.healFailedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
func (h *healSequence) countHeals(healType madmin.HealItemType, healed bool) {
|
||||
func (h *healSequence) countScanned(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
if !healed {
|
||||
h.scannedItemsMap[healType]++
|
||||
} else {
|
||||
h.healedItemsMap[healType]++
|
||||
}
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
func (h *healSequence) countHealed(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.healedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
@@ -734,7 +732,7 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
task.opts.ScanMode = madmin.HealNormalScan
|
||||
}
|
||||
|
||||
h.countHeals(healType, false)
|
||||
h.countScanned(healType)
|
||||
|
||||
if source.noWait {
|
||||
select {
|
||||
@@ -763,9 +761,23 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
return nil
|
||||
}
|
||||
|
||||
countOKDrives := func(drives []madmin.HealDriveInfo) (count int) {
|
||||
for _, drive := range drives {
|
||||
if drive.State == madmin.DriveStateOk {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// task queued, now wait for the response.
|
||||
select {
|
||||
case res := <-task.respCh:
|
||||
if res.err == nil {
|
||||
h.countHealed(healType)
|
||||
} else {
|
||||
h.countFailed(healType)
|
||||
}
|
||||
if !h.reportProgress {
|
||||
if errors.Is(res.err, errSkipFile) { // this is only sent usually by nopHeal
|
||||
return nil
|
||||
@@ -778,6 +790,11 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
if res.err != nil {
|
||||
res.result.Detail = res.err.Error()
|
||||
}
|
||||
if res.result.ParityBlocks > 0 && res.result.DataBlocks > 0 && res.result.DataBlocks > res.result.ParityBlocks {
|
||||
if got := countOKDrives(res.result.After.Drives); got < res.result.ParityBlocks {
|
||||
res.result.Detail = fmt.Sprintf("quorum loss - expected %d minimum, got drive states in OK %d", res.result.ParityBlocks, got)
|
||||
}
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
@@ -789,18 +806,20 @@ func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
|
||||
return h.healMinioSysMeta(objAPI, minioConfigPrefix)()
|
||||
}
|
||||
|
||||
func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
func (h *healSequence) healItems(objAPI ObjectLayer) error {
|
||||
if h.clientToken == bgHealingUUID {
|
||||
// For background heal do nothing.
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := h.healDiskMeta(objAPI); err != nil {
|
||||
return err
|
||||
if h.bucket == "" { // heal internal meta only during a site-wide heal
|
||||
if err := h.healDiskMeta(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Heal buckets and objects
|
||||
return h.healBuckets(objAPI, bucketsOnly)
|
||||
return h.healBuckets(objAPI)
|
||||
}
|
||||
|
||||
// traverseAndHeal - traverses on-disk data and performs healing
|
||||
@@ -811,8 +830,7 @@ func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
// has to wait until a safe point is reached, such as between scanning
|
||||
// two objects.
|
||||
func (h *healSequence) traverseAndHeal(objAPI ObjectLayer) {
|
||||
bucketsOnly := false // Heals buckets and objects also.
|
||||
h.traverseAndHealDoneCh <- h.healItems(objAPI, bucketsOnly)
|
||||
h.traverseAndHealDoneCh <- h.healItems(objAPI)
|
||||
xioutil.SafeClose(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
@@ -839,14 +857,14 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
func (h *healSequence) healBuckets(objAPI ObjectLayer) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// 1. If a bucket was specified, heal only the bucket.
|
||||
if h.bucket != "" {
|
||||
return h.healBucket(objAPI, h.bucket, bucketsOnly)
|
||||
return h.healBucket(objAPI, h.bucket, false)
|
||||
}
|
||||
|
||||
buckets, err := objAPI.ListBuckets(h.ctx, BucketOptions{})
|
||||
@@ -860,7 +878,7 @@ func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
})
|
||||
|
||||
for _, bucket := range buckets {
|
||||
if err = h.healBucket(objAPI, bucket.Name, bucketsOnly); err != nil {
|
||||
if err = h.healBucket(objAPI, bucket.Name, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,14 +159,14 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
// Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(adminMiddleware(adminAPI.ServerInfoHandler, traceAllFlag, noObjLayerFlag))
|
||||
adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(adminMiddleware(adminAPI.InspectDataHandler, noGZFlag, traceAllFlag))
|
||||
adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(adminMiddleware(adminAPI.InspectDataHandler, noGZFlag, traceHdrsS3HFlag))
|
||||
|
||||
// StorageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(adminMiddleware(adminAPI.StorageInfoHandler, traceAllFlag))
|
||||
// DataUsageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(adminMiddleware(adminAPI.DataUsageInfoHandler, traceAllFlag))
|
||||
// Metrics operation
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(adminMiddleware(adminAPI.MetricsHandler, traceAllFlag))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(adminMiddleware(adminAPI.MetricsHandler, traceHdrsS3HFlag))
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
// Heal operations
|
||||
@@ -193,9 +193,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// Profiling operations - deprecated API
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(adminMiddleware(adminAPI.StartProfilingHandler, traceAllFlag, noObjLayerFlag)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(adminMiddleware(adminAPI.DownloadProfilingHandler, traceAllFlag, noObjLayerFlag))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(adminMiddleware(adminAPI.DownloadProfilingHandler, traceHdrsS3HFlag, noObjLayerFlag))
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(adminMiddleware(adminAPI.ProfileHandler, traceAllFlag, noObjLayerFlag))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(adminMiddleware(adminAPI.ProfileHandler, traceHdrsS3HFlag, noObjLayerFlag))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
@@ -244,6 +244,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// STS accounts ops
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/temporary-account-info").HandlerFunc(adminMiddleware(adminAPI.TemporaryAccountInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Access key (service account/STS) operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-access-keys-bulk").HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysBulk)).Queries("listType", "{listType:.*}")
|
||||
|
||||
// Info policy IAM latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(adminMiddleware(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
|
||||
// List policies latest
|
||||
@@ -290,6 +293,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
// Import IAM info
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam").HandlerFunc(adminMiddleware(adminAPI.ImportIAM, noGZFlag))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam-v2").HandlerFunc(adminMiddleware(adminAPI.ImportIAMV2, noGZFlag))
|
||||
|
||||
// IDentity Provider configuration APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.AddIdentityProviderCfg))
|
||||
@@ -301,8 +305,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// LDAP specific service accounts ops
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp/ldap/add-service-account").HandlerFunc(adminMiddleware(adminAPI.AddServiceAccountLDAP))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys").
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)).
|
||||
Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}")
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)).Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys-bulk").
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAPBulk)).Queries("listType", "{listType:.*}")
|
||||
|
||||
// LDAP IAM operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/ldap/policy-entities").HandlerFunc(adminMiddleware(adminAPI.ListLDAPPolicyMappingEntities))
|
||||
@@ -340,6 +345,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-jobs").HandlerFunc(
|
||||
adminMiddleware(adminAPI.ListBatchJobs))
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/status-job").HandlerFunc(
|
||||
adminMiddleware(adminAPI.BatchJobStatus))
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/describe-job").HandlerFunc(
|
||||
adminMiddleware(adminAPI.DescribeBatchJob))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/cancel-job").HandlerFunc(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@@ -18,7 +18,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -31,6 +30,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
)
|
||||
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
@@ -64,9 +64,11 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
} else {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
// log once the error
|
||||
peersLogOnceIf(context.Background(), err, nodeName)
|
||||
if xnet.IsNetworkOrHostDown(err, false) {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
} else if xnet.IsNetworkOrHostDown(err, true) {
|
||||
network[nodeName] = "connection attempt timedout"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ type ObjectToDelete struct {
|
||||
ReplicateDecisionStr string `xml:"-"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
// createBucketLocationConfiguration container for bucket configuration request from client.
|
||||
// Used for parsing the location from the request body for Makebucket.
|
||||
type createBucketLocationConfiguration struct {
|
||||
XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"`
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
@@ -48,7 +48,7 @@ import (
|
||||
levent "github.com/minio/minio/internal/config/lambda/event"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// APIError structure
|
||||
@@ -287,6 +287,7 @@ const (
|
||||
ErrAdminNoSuchGroup
|
||||
ErrAdminGroupNotEmpty
|
||||
ErrAdminGroupDisabled
|
||||
ErrAdminInvalidGroupName
|
||||
ErrAdminNoSuchJob
|
||||
ErrAdminNoSuchPolicy
|
||||
ErrAdminPolicyChangeAlreadyApplied
|
||||
@@ -425,6 +426,7 @@ const (
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAddUserValidUTF
|
||||
ErrAdminResourceInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrAccountNotEligible
|
||||
@@ -443,6 +445,8 @@ const (
|
||||
ErrAdminNoAccessKey
|
||||
ErrAdminNoSecretKey
|
||||
|
||||
ErrIAMNotInitialized
|
||||
|
||||
apiErrCodeEnd // This is used only for the testing code
|
||||
)
|
||||
|
||||
@@ -456,9 +460,9 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError
|
||||
if err != nil {
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
}
|
||||
if globalSite.Region != "" {
|
||||
if region := globalSite.Region(); region != "" {
|
||||
if errCode == ErrAuthorizationHeaderMalformed {
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", region)
|
||||
return apiErr
|
||||
}
|
||||
}
|
||||
@@ -965,7 +969,7 @@ var errorCodes = errorCodeMap{
|
||||
ErrReplicationRemoteConnectionError: {
|
||||
Code: "XMinioAdminReplicationRemoteConnectionError",
|
||||
Description: "Remote service connection error",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrReplicationBandwidthLimitError: {
|
||||
Code: "XMinioAdminReplicationBandwidthLimitError",
|
||||
@@ -974,7 +978,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrReplicationNoExistingObjects: {
|
||||
Code: "XMinioReplicationNoExistingObjects",
|
||||
Description: "No matching ExistingsObjects rule enabled",
|
||||
Description: "No matching ExistingObjects rule enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrRemoteTargetDenyAddError: {
|
||||
@@ -1303,6 +1307,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Server not initialized yet, please try again.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrIAMNotInitialized: {
|
||||
Code: "XMinioIAMNotInitialized",
|
||||
Description: "IAM sub-system not initialized yet, please try again.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrBucketMetadataNotInitialized: {
|
||||
Code: "XMinioBucketMetadataNotInitialized",
|
||||
Description: "Bucket metadata not initialized yet, please try again.",
|
||||
@@ -1477,8 +1486,8 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrTooManyRequests: {
|
||||
Code: "TooManyRequests",
|
||||
Description: "Deadline exceeded while waiting in incoming queue, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
Description: "Please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusTooManyRequests,
|
||||
},
|
||||
ErrUnsupportedMetadata: {
|
||||
Code: "InvalidArgument",
|
||||
@@ -2101,6 +2110,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Expected LDAP short username but was given full DN.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminInvalidGroupName: {
|
||||
Code: "XMinioInvalidGroupName",
|
||||
Description: "The group name is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAddUserValidUTF: {
|
||||
Code: "XMinioInvalidUTF",
|
||||
Description: "Invalid UTF-8 character detected.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
}
|
||||
|
||||
// toAPIErrorCode - Converts embedded errors. Convenience
|
||||
@@ -2140,6 +2159,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrAdminNoSuchGroup
|
||||
case errGroupNotEmpty:
|
||||
apiErr = ErrAdminGroupNotEmpty
|
||||
case errGroupNameContainsReservedChars:
|
||||
apiErr = ErrAdminInvalidGroupName
|
||||
case errNoSuchJob:
|
||||
apiErr = ErrAdminNoSuchJob
|
||||
case errNoPolicyToAttachOrDetach:
|
||||
@@ -2154,6 +2175,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrEntityTooSmall
|
||||
case errAuthentication:
|
||||
apiErr = ErrAccessDenied
|
||||
case auth.ErrContainsReservedChars:
|
||||
apiErr = ErrAdminInvalidAccessKey
|
||||
case auth.ErrInvalidAccessKeyLength:
|
||||
apiErr = ErrAdminInvalidAccessKey
|
||||
case auth.ErrInvalidSecretKeyLength:
|
||||
@@ -2430,9 +2453,9 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
switch e := err.(type) {
|
||||
case kms.Error:
|
||||
apiErr = APIError{
|
||||
Description: e.Err.Error(),
|
||||
Code: e.APICode,
|
||||
HTTPStatusCode: e.HTTPStatusCode,
|
||||
Description: e.Err,
|
||||
HTTPStatusCode: e.Code,
|
||||
}
|
||||
case batchReplicationJobError:
|
||||
apiErr = APIError{
|
||||
@@ -2526,11 +2549,11 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
if len(e.Errors) >= 1 {
|
||||
apiErr.Code = e.Errors[0].Reason
|
||||
}
|
||||
case azblob.StorageError:
|
||||
case *azcore.ResponseError:
|
||||
apiErr = APIError{
|
||||
Code: string(e.ServiceCode()),
|
||||
Code: e.ErrorCode,
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: e.Response().StatusCode,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
// Add more other SDK related errors here if any in future.
|
||||
default:
|
||||
@@ -2569,7 +2592,7 @@ func getAPIError(code APIErrorCode) APIError {
|
||||
return errorCodes.ToAPIErr(ErrInternalError)
|
||||
}
|
||||
|
||||
// getErrorResponse gets in standard error and resource value and
|
||||
// getAPIErrorResponse gets in standard error and resource value and
|
||||
// provides a encodable populated response values
|
||||
func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID, hostID string) APIErrorResponse {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
@@ -2579,7 +2602,7 @@ func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalSite.Region,
|
||||
Region: globalSite.Region(),
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
ActualObjectSize: err.ObjectSize,
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
@@ -53,7 +54,7 @@ func setCommonHeaders(w http.ResponseWriter) {
|
||||
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalSite.Region; region != "" {
|
||||
if region := globalSite.Region(); region != "" {
|
||||
w.Header().Set(xhttp.AmzBucketRegion, region)
|
||||
}
|
||||
w.Header().Set(xhttp.AcceptRanges, "bytes")
|
||||
@@ -107,7 +108,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
@@ -135,7 +136,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
// Set tag count if object has tags
|
||||
if len(objInfo.UserTags) > 0 {
|
||||
tags, _ := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if tags.Count() > 0 {
|
||||
if tags != nil && tags.Count() > 0 {
|
||||
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tags.Count())}
|
||||
if opts.Tagging {
|
||||
// This is MinIO only extension to return back tags along with the count.
|
||||
@@ -212,7 +213,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
if objInfo.IsRemote() {
|
||||
// Check if object is being restored. For more information on x-amz-restore header see
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseSyntax
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.TransitionedObject.Tier}
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{filterStorageClass(ctx, objInfo.TransitionedObject.Tier)}
|
||||
}
|
||||
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
|
||||
@@ -35,7 +35,7 @@ import (
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
xxml "github.com/minio/xxml"
|
||||
)
|
||||
|
||||
@@ -544,7 +544,7 @@ func cleanReservedKeys(metadata map[string]string) map[string]string {
|
||||
}
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
|
||||
func generateListVersionsResponse(ctx context.Context, bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
|
||||
owner := &Owner{
|
||||
@@ -573,7 +573,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
@@ -593,8 +593,9 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
for k, v := range cleanReservedKeys(object.UserDefined) {
|
||||
content.UserMetadata.Set(k, v)
|
||||
}
|
||||
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
if !object.Expires.IsZero() {
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
}
|
||||
content.Internal = &ObjectInternalInfo{
|
||||
K: object.DataBlocks,
|
||||
M: object.ParityBlocks,
|
||||
@@ -634,7 +635,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
}
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
func generateListObjectsV1Response(ctx context.Context, bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
owner := &Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
@@ -654,7 +655,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
@@ -683,7 +684,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
}
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
|
||||
func generateListObjectsV2Response(ctx context.Context, bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner *Owner
|
||||
if fetchOwner {
|
||||
@@ -707,7 +708,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
@@ -729,7 +730,9 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
for k, v := range cleanReservedKeys(object.UserDefined) {
|
||||
content.UserMetadata.Set(k, v)
|
||||
}
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
if !object.Expires.IsZero() {
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
}
|
||||
content.Internal = &ObjectInternalInfo{
|
||||
K: object.DataBlocks,
|
||||
M: object.ParityBlocks,
|
||||
@@ -789,8 +792,8 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi
|
||||
}
|
||||
|
||||
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
|
||||
func generateCompleteMultpartUploadResponse(bucket, key, location string, oi ObjectInfo) CompleteMultipartUploadResponse {
|
||||
cs := oi.decryptChecksums(0)
|
||||
func generateCompleteMultipartUploadResponse(bucket, key, location string, oi ObjectInfo, h http.Header) CompleteMultipartUploadResponse {
|
||||
cs := oi.decryptChecksums(0, h)
|
||||
c := CompleteMultipartUploadResponse{
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
@@ -946,17 +949,18 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
|
||||
|
||||
// writeErrorResponse writes error headers
|
||||
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||
if err.HTTPStatusCode == http.StatusServiceUnavailable {
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
switch err.HTTPStatusCode {
|
||||
case http.StatusServiceUnavailable, http.StatusTooManyRequests:
|
||||
// Set retry-after header to indicate user-agents to retry request after 60 seconds.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
w.Header().Set(xhttp.RetryAfter, "60")
|
||||
}
|
||||
|
||||
switch err.Code {
|
||||
case "InvalidRegion":
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region)
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region())
|
||||
case "AuthorizationHeaderMalformed":
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region())
|
||||
}
|
||||
|
||||
// Similar check to http.checkWriteHeaderCode
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
consoleapi "github.com/minio/console/api"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
@@ -64,7 +64,7 @@ func setObjectLayer(o ObjectLayer) {
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
// objectAPIHandlers implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
}
|
||||
@@ -436,7 +436,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
Queries("notification", "")
|
||||
// ListenNotification
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag)).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag, traceHdrsS3HFlag)).
|
||||
Queries("events", "{events:.*}")
|
||||
// ResetBucketReplicationStatus - MinIO extension API
|
||||
router.Methods(http.MethodGet).
|
||||
@@ -456,6 +456,14 @@ func registerAPIRouter(router *mux.Router) {
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketCorsHandler)).
|
||||
Queries("cors", "")
|
||||
// PutBucketCors - this is a dummy call.
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketCorsHandler)).
|
||||
Queries("cors", "")
|
||||
// DeleteBucketCors - this is a dummy call.
|
||||
router.Methods(http.MethodDelete).
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteBucketCorsHandler)).
|
||||
Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketWebsiteHandler)).
|
||||
@@ -472,6 +480,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketLoggingHandler)).
|
||||
Queries("logging", "")
|
||||
|
||||
// GetBucketTaggingHandler
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketTaggingHandler)).
|
||||
@@ -615,7 +624,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag)).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag, traceHdrsS3HFlag)).
|
||||
Queries("events", "{events:.*}")
|
||||
|
||||
// ListBuckets
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -41,7 +41,7 @@ import (
|
||||
xjwt "github.com/minio/minio/internal/jwt"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/mcontext"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Verify if request has JWT.
|
||||
@@ -178,7 +178,7 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
||||
|
||||
logger.GetReqInfo(ctx).Cred = cred
|
||||
logger.GetReqInfo(ctx).Owner = owner
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region()
|
||||
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
@@ -222,7 +222,7 @@ func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
return claims
|
||||
}
|
||||
|
||||
func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{}, error) {
|
||||
func getClaimsFromTokenWithSecret(token, secret string) (*xjwt.MapClaims, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
@@ -244,7 +244,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
|
||||
|
||||
// If AuthZPlugin is set, return without any further checks.
|
||||
if newGlobalAuthZPluginFn() != nil {
|
||||
return claims.Map(), nil
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// Check if a session policy is set. If so, decode it here.
|
||||
@@ -263,12 +263,16 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
|
||||
claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes)
|
||||
}
|
||||
|
||||
return claims.Map(), nil
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
return getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
jwtClaims, err := getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jwtClaims.Map(), nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
@@ -319,7 +323,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
return claims.Map(), ErrNone
|
||||
}
|
||||
|
||||
claims := xjwt.NewMapClaims()
|
||||
@@ -368,7 +372,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
switch action {
|
||||
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
|
||||
region = ""
|
||||
@@ -384,7 +388,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
||||
|
||||
logger.GetReqInfo(ctx).Cred = cred
|
||||
logger.GetReqInfo(ctx).Owner = owner
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region()
|
||||
|
||||
// region is valid only for CreateBucketAction.
|
||||
var region string
|
||||
@@ -684,7 +688,7 @@ func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool,
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
@@ -745,7 +749,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
switch atype {
|
||||
case authTypeUnknown:
|
||||
return ErrSignatureVersionNotSupported
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
type nullReader struct{}
|
||||
@@ -403,7 +403,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
|
||||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region(), serviceS3)
|
||||
if s3Error != testCase.s3Error {
|
||||
if _, err := io.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
|
||||
@@ -443,7 +443,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region()); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
// healTask represents what to heal along with options
|
||||
@@ -133,19 +133,20 @@ func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer, bgSeq *
|
||||
}
|
||||
}
|
||||
|
||||
if bgSeq != nil {
|
||||
// We increment relevant counter based on the heal result for prometheus reporting.
|
||||
if err != nil {
|
||||
bgSeq.countFailed(res)
|
||||
} else {
|
||||
bgSeq.countHeals(res.Type, false)
|
||||
}
|
||||
}
|
||||
|
||||
if task.respCh != nil {
|
||||
task.respCh <- healResult{result: res, err: err}
|
||||
continue
|
||||
}
|
||||
|
||||
// when respCh is not set caller is not waiting but we
|
||||
// update the relevant metrics for them
|
||||
if bgSeq != nil {
|
||||
if err == nil {
|
||||
bgSeq.countHealed(res.Type)
|
||||
} else {
|
||||
bgSeq.countFailed(res.Type)
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -72,10 +72,12 @@ type healingTracker struct {
|
||||
|
||||
// Numbers when current bucket started healing,
|
||||
// for resuming with correct numbers.
|
||||
ResumeItemsHealed uint64 `json:"-"`
|
||||
ResumeItemsFailed uint64 `json:"-"`
|
||||
ResumeBytesDone uint64 `json:"-"`
|
||||
ResumeBytesFailed uint64 `json:"-"`
|
||||
ResumeItemsHealed uint64 `json:"-"`
|
||||
ResumeItemsFailed uint64 `json:"-"`
|
||||
ResumeItemsSkipped uint64 `json:"-"`
|
||||
ResumeBytesDone uint64 `json:"-"`
|
||||
ResumeBytesFailed uint64 `json:"-"`
|
||||
ResumeBytesSkipped uint64 `json:"-"`
|
||||
|
||||
// Filled on startup/restarts.
|
||||
QueuedBuckets []string
|
||||
@@ -88,6 +90,11 @@ type healingTracker struct {
|
||||
|
||||
ItemsSkipped uint64
|
||||
BytesSkipped uint64
|
||||
|
||||
RetryAttempts uint64
|
||||
|
||||
Finished bool // finished healing, whether with errors or not
|
||||
|
||||
// Add future tracking capabilities
|
||||
// Be sure that they are included in toHealingDisk
|
||||
}
|
||||
@@ -141,14 +148,34 @@ func initHealingTracker(disk StorageAPI, healID string) *healingTracker {
|
||||
return h
|
||||
}
|
||||
|
||||
func (h healingTracker) getLastUpdate() time.Time {
|
||||
func (h *healingTracker) resetHealing() {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
h.ItemsHealed = 0
|
||||
h.ItemsFailed = 0
|
||||
h.BytesDone = 0
|
||||
h.BytesFailed = 0
|
||||
h.ResumeItemsHealed = 0
|
||||
h.ResumeItemsFailed = 0
|
||||
h.ResumeBytesDone = 0
|
||||
h.ResumeBytesFailed = 0
|
||||
h.ItemsSkipped = 0
|
||||
h.BytesSkipped = 0
|
||||
|
||||
h.HealedBuckets = nil
|
||||
h.Object = ""
|
||||
h.Bucket = ""
|
||||
}
|
||||
|
||||
func (h *healingTracker) getLastUpdate() time.Time {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
return h.LastUpdate
|
||||
}
|
||||
|
||||
func (h healingTracker) getBucket() string {
|
||||
func (h *healingTracker) getBucket() string {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
@@ -162,7 +189,7 @@ func (h *healingTracker) setBucket(bucket string) {
|
||||
h.Bucket = bucket
|
||||
}
|
||||
|
||||
func (h healingTracker) getObject() string {
|
||||
func (h *healingTracker) getObject() string {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
@@ -196,9 +223,6 @@ func (h *healingTracker) updateProgress(success, skipped bool, bytes uint64) {
|
||||
// update will update the tracker on the disk.
|
||||
// If the tracker has been deleted an error is returned.
|
||||
func (h *healingTracker) update(ctx context.Context) error {
|
||||
if h.disk.Healing() == nil {
|
||||
return fmt.Errorf("healingTracker: drive %q is not marked as healing", h.ID)
|
||||
}
|
||||
h.mu.Lock()
|
||||
if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
|
||||
h.ID, _ = h.disk.GetDiskID()
|
||||
@@ -260,8 +284,10 @@ func (h *healingTracker) resume() {
|
||||
|
||||
h.ItemsHealed = h.ResumeItemsHealed
|
||||
h.ItemsFailed = h.ResumeItemsFailed
|
||||
h.ItemsSkipped = h.ResumeItemsSkipped
|
||||
h.BytesDone = h.ResumeBytesDone
|
||||
h.BytesFailed = h.ResumeBytesFailed
|
||||
h.BytesSkipped = h.ResumeBytesSkipped
|
||||
}
|
||||
|
||||
// bucketDone should be called when a bucket is done healing.
|
||||
@@ -272,8 +298,10 @@ func (h *healingTracker) bucketDone(bucket string) {
|
||||
|
||||
h.ResumeItemsHealed = h.ItemsHealed
|
||||
h.ResumeItemsFailed = h.ItemsFailed
|
||||
h.ResumeItemsSkipped = h.ItemsSkipped
|
||||
h.ResumeBytesDone = h.BytesDone
|
||||
h.ResumeBytesFailed = h.BytesFailed
|
||||
h.ResumeBytesSkipped = h.BytesSkipped
|
||||
h.HealedBuckets = append(h.HealedBuckets, bucket)
|
||||
for i, b := range h.QueuedBuckets {
|
||||
if b == bucket {
|
||||
@@ -322,6 +350,7 @@ func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
|
||||
PoolIndex: h.PoolIndex,
|
||||
SetIndex: h.SetIndex,
|
||||
DiskIndex: h.DiskIndex,
|
||||
Finished: h.Finished,
|
||||
Path: h.Path,
|
||||
Started: h.Started.UTC(),
|
||||
LastUpdate: h.LastUpdate.UTC(),
|
||||
@@ -337,6 +366,7 @@ func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
|
||||
Object: h.Object,
|
||||
QueuedBuckets: h.QueuedBuckets,
|
||||
HealedBuckets: h.HealedBuckets,
|
||||
RetryAttempts: h.RetryAttempts,
|
||||
|
||||
ObjectsHealed: h.ItemsHealed, // Deprecated July 2021
|
||||
ObjectsFailed: h.ItemsFailed, // Deprecated July 2021
|
||||
@@ -351,24 +381,26 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
|
||||
initBackgroundHealing(ctx, objAPI) // start quick background healing
|
||||
|
||||
if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn || env.Get("_MINIO_AUTO_DISK_HEALING", config.EnableOn) == config.EnableOn {
|
||||
if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn {
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
go monitorLocalDisksAndHeal(ctx, z)
|
||||
}
|
||||
|
||||
go globalMRFState.startMRFPersistence()
|
||||
go globalMRFState.healRoutine(z)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
globalLocalDrivesMu.RLock()
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
localDrives := cloneDrives(globalLocalDrivesMap)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
for _, disk := range localDrives {
|
||||
_, err := disk.GetDiskID()
|
||||
_, err := disk.DiskInfo(context.Background(), DiskInfoOptions{})
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
continue
|
||||
}
|
||||
if disk.Healing() != nil {
|
||||
if h := disk.Healing(); h != nil && !h.Finished {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
@@ -382,6 +414,8 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
|
||||
var newDiskHealingTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
|
||||
var errRetryHealing = errors.New("some items failed to heal, we will retry healing this drive again")
|
||||
|
||||
func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint) error {
|
||||
poolIdx, setIdx := endpoint.PoolIdx, endpoint.SetIdx
|
||||
disk := getStorageViaEndpoint(endpoint)
|
||||
@@ -389,6 +423,17 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
return fmt.Errorf("Unexpected error disk must be initialized by now after formatting: %s", endpoint)
|
||||
}
|
||||
|
||||
_, err := disk.DiskInfo(ctx, DiskInfoOptions{})
|
||||
if err != nil {
|
||||
if errors.Is(err, errDriveIsRoot) {
|
||||
// This is a root drive, ignore and move on
|
||||
return nil
|
||||
}
|
||||
if !errors.Is(err, errUnformattedDisk) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Prevent parallel erasure set healing
|
||||
locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx))
|
||||
lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout)
|
||||
@@ -451,12 +496,30 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
return err
|
||||
}
|
||||
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
// if objects have failed healing, we attempt a retry to heal the drive upto 3 times before giving up.
|
||||
if tracker.ItemsFailed > 0 && tracker.RetryAttempts < 4 {
|
||||
tracker.RetryAttempts++
|
||||
|
||||
if len(tracker.QueuedBuckets) > 0 {
|
||||
return fmt.Errorf("not all buckets were healed: %v", tracker.QueuedBuckets)
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retrying %s time (healed: %d, skipped: %d, failed: %d).", disk,
|
||||
humanize.Ordinal(int(tracker.RetryAttempts)), tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
|
||||
tracker.resetHealing()
|
||||
bugLogIf(ctx, tracker.update(ctx))
|
||||
|
||||
return errRetryHealing
|
||||
}
|
||||
|
||||
if tracker.ItemsFailed > 0 {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retried %d times (healed: %d, skipped: %d, failed: %d).", disk,
|
||||
tracker.RetryAttempts, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
} else {
|
||||
if tracker.RetryAttempts > 0 {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is complete, retried %d times (healed: %d, skipped: %d).", disk,
|
||||
tracker.RetryAttempts-1, tracker.ItemsHealed, tracker.ItemsSkipped)
|
||||
} else {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped)
|
||||
}
|
||||
}
|
||||
if serverDebugLog {
|
||||
tracker.printTo(os.Stdout)
|
||||
fmt.Printf("\n")
|
||||
@@ -486,7 +549,8 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
continue
|
||||
}
|
||||
if t.HealID == tracker.HealID {
|
||||
t.delete(ctx)
|
||||
t.Finished = true
|
||||
t.update(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -528,7 +592,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) {
|
||||
if err := healFreshDisk(ctx, z, disk); err != nil {
|
||||
globalBackgroundHealState.setDiskHealingStatus(disk, false)
|
||||
timedout := OperationTimedOut{}
|
||||
if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) {
|
||||
if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) && !errors.Is(err, errRetryHealing) {
|
||||
printEndpointError(disk, err, false)
|
||||
}
|
||||
return
|
||||
|
||||
@@ -132,6 +132,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeItemsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeItemsSkipped":
|
||||
z.ResumeItemsSkipped, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeItemsSkipped")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
@@ -144,6 +150,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesSkipped":
|
||||
z.ResumeBytesSkipped, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesSkipped")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
@@ -200,6 +212,18 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
case "RetryAttempts":
|
||||
z.RetryAttempts, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "Finished":
|
||||
z.Finished, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Finished")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -213,9 +237,9 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 25
|
||||
// map header, size 29
|
||||
// write "ID"
|
||||
err = en.Append(0xde, 0x0, 0x19, 0xa2, 0x49, 0x44)
|
||||
err = en.Append(0xde, 0x0, 0x1d, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -394,6 +418,16 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeItemsFailed")
|
||||
return
|
||||
}
|
||||
// write "ResumeItemsSkipped"
|
||||
err = en.Append(0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeItemsSkipped)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeItemsSkipped")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesDone"
|
||||
err = en.Append(0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
if err != nil {
|
||||
@@ -414,6 +448,16 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesSkipped"
|
||||
err = en.Append(0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeBytesSkipped)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesSkipped")
|
||||
return
|
||||
}
|
||||
// write "QueuedBuckets"
|
||||
err = en.Append(0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
if err != nil {
|
||||
@@ -478,15 +522,35 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
// write "RetryAttempts"
|
||||
err = en.Append(0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.RetryAttempts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
// write "Finished"
|
||||
err = en.Append(0xa8, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBool(z.Finished)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Finished")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 25
|
||||
// map header, size 29
|
||||
// string "ID"
|
||||
o = append(o, 0xde, 0x0, 0x19, 0xa2, 0x49, 0x44)
|
||||
o = append(o, 0xde, 0x0, 0x1d, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
// string "PoolIndex"
|
||||
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
@@ -539,12 +603,18 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "ResumeItemsFailed"
|
||||
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeItemsFailed)
|
||||
// string "ResumeItemsSkipped"
|
||||
o = append(o, 0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeItemsSkipped)
|
||||
// string "ResumeBytesDone"
|
||||
o = append(o, 0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesDone)
|
||||
// string "ResumeBytesFailed"
|
||||
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesFailed)
|
||||
// string "ResumeBytesSkipped"
|
||||
o = append(o, 0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesSkipped)
|
||||
// string "QueuedBuckets"
|
||||
o = append(o, 0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.QueuedBuckets)))
|
||||
@@ -566,6 +636,12 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "BytesSkipped"
|
||||
o = append(o, 0xac, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.BytesSkipped)
|
||||
// string "RetryAttempts"
|
||||
o = append(o, 0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
|
||||
o = msgp.AppendUint64(o, z.RetryAttempts)
|
||||
// string "Finished"
|
||||
o = append(o, 0xa8, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64)
|
||||
o = msgp.AppendBool(o, z.Finished)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -695,6 +771,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "ResumeItemsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeItemsSkipped":
|
||||
z.ResumeItemsSkipped, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeItemsSkipped")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
@@ -707,6 +789,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesSkipped":
|
||||
z.ResumeBytesSkipped, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesSkipped")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
@@ -763,6 +851,18 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
case "RetryAttempts":
|
||||
z.RetryAttempts, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "Finished":
|
||||
z.Finished, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Finished")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -777,7 +877,7 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *healingTracker) Msgsize() (s int) {
|
||||
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 18 + msgp.Uint64Size + 17 + msgp.Uint64Size + 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 18 + msgp.Uint64Size + 18 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
|
||||
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 18 + msgp.Uint64Size + 17 + msgp.Uint64Size + 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 18 + msgp.Uint64Size + 18 + msgp.Uint64Size + 19 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 19 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.QueuedBuckets[za0001])
|
||||
}
|
||||
@@ -785,6 +885,6 @@ func (z *healingTracker) Msgsize() (s int) {
|
||||
for za0002 := range z.HealedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
|
||||
}
|
||||
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size
|
||||
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size + 14 + msgp.Uint64Size + 9 + msgp.BoolSize
|
||||
return
|
||||
}
|
||||
|
||||
@@ -33,9 +33,10 @@ import (
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
"github.com/minio/pkg/v3/xtime"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
@@ -116,7 +117,7 @@ func (p BatchJobExpirePurge) Validate() error {
|
||||
// BatchJobExpireFilter holds all the filters currently supported for batch replication
|
||||
type BatchJobExpireFilter struct {
|
||||
line, col int
|
||||
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedBefore *time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
@@ -162,7 +163,7 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
|
||||
if len(ef.Name) > 0 && !wildcard.Match(ef.Name, obj.Name) {
|
||||
return false
|
||||
}
|
||||
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan {
|
||||
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan.D() {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -280,7 +281,7 @@ type BatchJobExpire struct {
|
||||
line, col int
|
||||
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
|
||||
Bucket string `yaml:"bucket" json:"bucket"`
|
||||
Prefix string `yaml:"prefix" json:"prefix"`
|
||||
Prefix BatchJobPrefix `yaml:"prefix" json:"prefix"`
|
||||
NotificationCfg BatchJobNotification `yaml:"notify" json:"notify"`
|
||||
Retry BatchJobRetry `yaml:"retry" json:"retry"`
|
||||
Rules []BatchJobExpireFilter `yaml:"rules" json:"rules"`
|
||||
@@ -340,8 +341,24 @@ func (r *BatchJobExpire) Expire(ctx context.Context, api ObjectLayer, vc *versio
|
||||
PrefixEnabledFn: vc.PrefixEnabled,
|
||||
VersionSuspended: vc.Suspended(),
|
||||
}
|
||||
_, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel, opts)
|
||||
return errs
|
||||
|
||||
allErrs := make([]error, 0, len(objsToDel))
|
||||
|
||||
for {
|
||||
count := len(objsToDel)
|
||||
if count == 0 {
|
||||
break
|
||||
}
|
||||
if count > maxDeleteList {
|
||||
count = maxDeleteList
|
||||
}
|
||||
_, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel[:count], opts)
|
||||
allErrs = append(allErrs, errs...)
|
||||
// Next batch of deletion
|
||||
objsToDel = objsToDel[count:]
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -371,9 +388,12 @@ func (oiCache objInfoCache) Get(toDel ObjectToDelete) (*ObjectInfo, bool) {
|
||||
|
||||
func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo, job BatchJobRequest, api ObjectLayer, wk *workers.Workers, expireCh <-chan []expireObjInfo) {
|
||||
vc, _ := globalBucketVersioningSys.Get(r.Bucket)
|
||||
retryAttempts := r.Retry.Attempts
|
||||
retryAttempts := job.Expire.Retry.Attempts
|
||||
if retryAttempts <= 0 {
|
||||
retryAttempts = batchExpireJobDefaultRetries
|
||||
}
|
||||
delay := job.Expire.Retry.Delay
|
||||
if delay == 0 {
|
||||
if delay <= 0 {
|
||||
delay = batchExpireJobDefaultRetryDelay
|
||||
}
|
||||
|
||||
@@ -432,7 +452,7 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
})
|
||||
if err != nil {
|
||||
stopFn(exp, err)
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts))
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s due to %v (attempts=%d)", exp.Bucket, exp.Name, err, attempts))
|
||||
} else {
|
||||
stopFn(exp, err)
|
||||
success = true
|
||||
@@ -464,13 +484,13 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
copy(toDelCopy, toDel)
|
||||
var failed int
|
||||
errs := r.Expire(ctx, api, vc, toDel)
|
||||
// reslice toDel in preparation for next retry
|
||||
// attempt
|
||||
// reslice toDel in preparation for next retry attempt
|
||||
toDel = toDel[:0]
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
stopFn(toDelCopy[i], err)
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts))
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID,
|
||||
err, attempts))
|
||||
failed++
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, false, attempts)
|
||||
@@ -514,7 +534,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
JobType: string(job.Type()),
|
||||
StartTime: job.Started,
|
||||
}
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
if err := ri.loadOrInit(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -534,40 +554,58 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
ctx, cancelCause := context.WithCancelCause(ctx)
|
||||
defer cancelCause(nil)
|
||||
|
||||
results := make(chan itemOrErr[ObjectInfo], workerSize)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
|
||||
VersionsSort: WalkVersionsSortDesc,
|
||||
}); err != nil {
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
prefixes := r.Prefix.F()
|
||||
if len(prefixes) == 0 {
|
||||
prefixes = []string{""}
|
||||
}
|
||||
for _, prefix := range prefixes {
|
||||
prefixResultCh := make(chan itemOrErr[ObjectInfo], workerSize)
|
||||
err := api.Walk(ctx, r.Bucket, prefix, prefixResultCh, WalkOptions{
|
||||
Marker: lastObject,
|
||||
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
|
||||
VersionsSort: WalkVersionsSortDesc,
|
||||
})
|
||||
if err != nil {
|
||||
cancelCause(err)
|
||||
xioutil.SafeClose(results)
|
||||
return
|
||||
}
|
||||
for result := range prefixResultCh {
|
||||
results <- result
|
||||
}
|
||||
}
|
||||
xioutil.SafeClose(results)
|
||||
}()
|
||||
|
||||
// Goroutine to periodically save batch-expire job's in-memory state
|
||||
saverQuitCh := make(chan struct{})
|
||||
go func() {
|
||||
saveTicker := time.NewTicker(10 * time.Second)
|
||||
defer saveTicker.Stop()
|
||||
for {
|
||||
quit := false
|
||||
after := time.Minute
|
||||
for !quit {
|
||||
select {
|
||||
case <-saveTicker.C:
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
|
||||
case <-ctx.Done():
|
||||
// persist in-memory state immediately before exiting due to context cancellation.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
|
||||
quit = true
|
||||
case <-saverQuitCh:
|
||||
// persist in-memory state immediately to disk.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
quit = true
|
||||
}
|
||||
|
||||
if quit {
|
||||
// save immediately if we are quitting
|
||||
after = 0
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, 30*time.Second) // independent context
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, after, job))
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -584,7 +622,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
versionsCount int
|
||||
toDel []expireObjInfo
|
||||
)
|
||||
failed := true
|
||||
failed := false
|
||||
for result := range results {
|
||||
if result.Err != nil {
|
||||
failed = true
|
||||
@@ -653,6 +691,10 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
ObjectInfo: result.Item,
|
||||
})
|
||||
}
|
||||
if context.Cause(ctx) != nil {
|
||||
xioutil.SafeClose(expireCh)
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
// Send any remaining objects downstream
|
||||
if len(toDel) > 0 {
|
||||
select {
|
||||
|
||||
@@ -39,7 +39,7 @@ func (z *BatchJobExpire) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, err = dc.ReadString()
|
||||
err = z.Prefix.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -114,7 +114,7 @@ func (z *BatchJobExpire) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Prefix)
|
||||
err = z.Prefix.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -171,7 +171,11 @@ func (z *BatchJobExpire) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendString(o, z.Bucket)
|
||||
// string "Prefix"
|
||||
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
o = msgp.AppendString(o, z.Prefix)
|
||||
o, err = z.Prefix.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
// string "NotificationCfg"
|
||||
o = append(o, 0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x66, 0x67)
|
||||
o, err = z.NotificationCfg.MarshalMsg(o)
|
||||
@@ -230,7 +234,7 @@ func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
|
||||
bts, err = z.Prefix.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -280,7 +284,7 @@ func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobExpire) Msgsize() (s int) {
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + z.Prefix.Msgsize() + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Rules {
|
||||
s += z.Rules[za0001].Msgsize()
|
||||
}
|
||||
@@ -306,7 +310,7 @@ func (z *BatchJobExpireFilter) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "OlderThan":
|
||||
z.OlderThan, err = dc.ReadDuration()
|
||||
err = z.OlderThan.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -433,7 +437,7 @@ func (z *BatchJobExpireFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.OlderThan)
|
||||
err = z.OlderThan.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -544,7 +548,11 @@ func (z *BatchJobExpireFilter) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// map header, size 8
|
||||
// string "OlderThan"
|
||||
o = append(o, 0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.OlderThan)
|
||||
o, err = z.OlderThan.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
// string "CreatedBefore"
|
||||
o = append(o, 0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65)
|
||||
if z.CreatedBefore == nil {
|
||||
@@ -613,7 +621,7 @@ func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "OlderThan":
|
||||
z.OlderThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
bts, err = z.OlderThan.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -734,7 +742,7 @@ func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobExpireFilter) Msgsize() (s int) {
|
||||
s = 1 + 10 + msgp.DurationSize + 14
|
||||
s = 1 + 10 + z.OlderThan.Msgsize() + 14
|
||||
if z.CreatedBefore == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
|
||||
@@ -18,9 +18,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestParseBatchJobExpire(t *testing.T) {
|
||||
@@ -32,7 +33,7 @@ expire: # Expire objects that match a condition
|
||||
rules:
|
||||
- type: object # regular objects with zero or more older versions
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 70h # match objects older than this value
|
||||
olderThan: 7d10h # match objects older than this value
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
tags:
|
||||
- key: name
|
||||
@@ -64,8 +65,61 @@ expire: # Expire objects that match a condition
|
||||
delay: 500ms # least amount of delay between each retry
|
||||
`
|
||||
var job BatchJobRequest
|
||||
err := yaml.UnmarshalStrict([]byte(expireYaml), &job)
|
||||
err := yaml.Unmarshal([]byte(expireYaml), &job)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml", err)
|
||||
}
|
||||
if !slices.Equal(job.Expire.Prefix.F(), []string{"myprefix"}) {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml")
|
||||
}
|
||||
|
||||
multiPrefixExpireYaml := `
|
||||
expire: # Expire objects that match a condition
|
||||
apiVersion: v1
|
||||
bucket: mybucket # Bucket where this batch job will expire matching objects from
|
||||
prefix: # (Optional) Prefix under which this job will expire objects matching the rules below.
|
||||
- myprefix
|
||||
- myprefix1
|
||||
rules:
|
||||
- type: object # regular objects with zero or more older versions
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 7d10h # match objects older than this value
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
tags:
|
||||
- key: name
|
||||
value: pick* # match objects with tag 'name', all values starting with 'pick'
|
||||
metadata:
|
||||
- key: content-type
|
||||
value: image/* # match objects with 'content-type', all values starting with 'image/'
|
||||
size:
|
||||
lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB)
|
||||
greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
|
||||
purge:
|
||||
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
# retainVersions: 5 # keep the latest 5 versions of the object.
|
||||
|
||||
- type: deleted # objects with delete marker as their latest version
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
purge:
|
||||
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
# retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
|
||||
|
||||
notify:
|
||||
endpoint: https://notify.endpoint # notification endpoint to receive job completion status
|
||||
token: Bearer xxxxx # optional authentication token for the notification endpoint
|
||||
|
||||
retry:
|
||||
attempts: 10 # number of retries for the job before giving up
|
||||
delay: 500ms # least amount of delay between each retry
|
||||
`
|
||||
var multiPrefixJob BatchJobRequest
|
||||
err = yaml.Unmarshal([]byte(multiPrefixExpireYaml), &multiPrefixJob)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml", err)
|
||||
}
|
||||
if !slices.Equal(multiPrefixJob.Expire.Prefix.F(), []string{"myprefix", "myprefix1"}) {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -48,15 +49,20 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/pkg/v2/console"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"github.com/minio/pkg/v3/console"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var globalBatchConfig batch.Config
|
||||
|
||||
const (
|
||||
// Keep the completed/failed job stats 3 days before removing it
|
||||
oldJobsExpiration = 3 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// BatchJobRequest this is an internal data structure not for external consumption.
|
||||
type BatchJobRequest struct {
|
||||
ID string `yaml:"-" json:"name"`
|
||||
@@ -262,7 +268,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
JobType: string(job.Type()),
|
||||
StartTime: job.Started,
|
||||
}
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
if err := ri.loadOrInit(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
if ri.Complete {
|
||||
@@ -270,8 +276,12 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
}
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
|
||||
retryAttempts := job.Replicate.Flags.Retry.Attempts
|
||||
if retryAttempts <= 0 {
|
||||
retryAttempts = batchReplJobDefaultRetries
|
||||
}
|
||||
delay := job.Replicate.Flags.Retry.Delay
|
||||
if delay == 0 {
|
||||
if delay <= 0 {
|
||||
delay = batchReplJobDefaultRetryDelay
|
||||
}
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
@@ -281,22 +291,22 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
isStorageClassOnly := len(r.Flags.Filter.Metadata) == 1 && strings.EqualFold(r.Flags.Filter.Metadata[0].Key, xhttp.AmzStorageClass)
|
||||
|
||||
skip := func(oi ObjectInfo) (ok bool) {
|
||||
if r.Flags.Filter.OlderThan > 0 && time.Since(oi.ModTime) < r.Flags.Filter.OlderThan {
|
||||
if r.Flags.Filter.OlderThan > 0 && time.Since(oi.ModTime) < r.Flags.Filter.OlderThan.D() {
|
||||
// skip all objects that are newer than specified older duration
|
||||
return true
|
||||
}
|
||||
|
||||
if r.Flags.Filter.NewerThan > 0 && time.Since(oi.ModTime) >= r.Flags.Filter.NewerThan {
|
||||
if r.Flags.Filter.NewerThan > 0 && time.Since(oi.ModTime) >= r.Flags.Filter.NewerThan.D() {
|
||||
// skip all objects that are older than specified newer duration
|
||||
return true
|
||||
}
|
||||
|
||||
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(oi.ModTime) {
|
||||
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.After(oi.ModTime) {
|
||||
// skip all objects that are created before the specified time.
|
||||
return true
|
||||
}
|
||||
|
||||
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(oi.ModTime) {
|
||||
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.Before(oi.ModTime) {
|
||||
// skip all objects that are created after the specified time.
|
||||
return true
|
||||
}
|
||||
@@ -371,7 +381,6 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
return err
|
||||
}
|
||||
|
||||
retryAttempts := ri.RetryAttempts
|
||||
retry := false
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
attempts := attempts
|
||||
@@ -379,12 +388,27 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
|
||||
minioSrc := r.Source.Type == BatchJobReplicateResourceMinIO
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
objInfoCh := c.ListObjects(ctx, r.Source.Bucket, miniogo.ListObjectsOptions{
|
||||
Prefix: r.Source.Prefix,
|
||||
WithVersions: minioSrc,
|
||||
Recursive: true,
|
||||
WithMetadata: true,
|
||||
})
|
||||
|
||||
objInfoCh := make(chan miniogo.ObjectInfo, 1)
|
||||
go func() {
|
||||
prefixes := r.Source.Prefix.F()
|
||||
if len(prefixes) == 0 {
|
||||
prefixes = []string{""}
|
||||
}
|
||||
for _, prefix := range prefixes {
|
||||
prefixObjInfoCh := c.ListObjects(ctx, r.Source.Bucket, miniogo.ListObjectsOptions{
|
||||
Prefix: prefix,
|
||||
WithVersions: minioSrc,
|
||||
Recursive: true,
|
||||
WithMetadata: true,
|
||||
})
|
||||
for obj := range prefixObjInfoCh {
|
||||
objInfoCh <- obj
|
||||
}
|
||||
}
|
||||
xioutil.SafeClose(objInfoCh)
|
||||
}()
|
||||
|
||||
prevObj := ""
|
||||
skipReplicate := false
|
||||
|
||||
@@ -534,7 +558,7 @@ func toObjectInfo(bucket, object string, objInfo miniogo.ObjectInfo) ObjectInfo
|
||||
return oi
|
||||
}
|
||||
|
||||
func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLayer, remoteClnt *minio.Client, entries []ObjectInfo) error {
|
||||
func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLayer, remoteClnt *minio.Client, entries []ObjectInfo, prefix string) error {
|
||||
input := make(chan minio.SnowballObject, 1)
|
||||
opts := minio.SnowballOptions{
|
||||
Opts: minio.PutObjectOptions{},
|
||||
@@ -556,6 +580,10 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa
|
||||
continue
|
||||
}
|
||||
|
||||
if prefix != "" {
|
||||
entry.Name = pathJoin(prefix, entry.Name)
|
||||
}
|
||||
|
||||
snowballObj := minio.SnowballObject{
|
||||
// Create path to store objects within the bucket.
|
||||
Key: entry.Name,
|
||||
@@ -719,63 +747,73 @@ const (
|
||||
|
||||
batchReplJobAPIVersion = "v1"
|
||||
batchReplJobDefaultRetries = 3
|
||||
batchReplJobDefaultRetryDelay = 250 * time.Millisecond
|
||||
batchReplJobDefaultRetryDelay = time.Second
|
||||
)
|
||||
|
||||
func getJobReportPath(job BatchJobRequest) string {
|
||||
var fileName string
|
||||
switch {
|
||||
case job.Replicate != nil:
|
||||
fileName = batchReplName
|
||||
case job.KeyRotate != nil:
|
||||
fileName = batchKeyRotationName
|
||||
case job.Expire != nil:
|
||||
fileName = batchExpireName
|
||||
}
|
||||
return pathJoin(batchJobReportsPrefix, job.ID, fileName)
|
||||
}
|
||||
|
||||
func getJobPath(job BatchJobRequest) string {
|
||||
return pathJoin(batchJobPrefix, job.ID)
|
||||
}
|
||||
|
||||
func (ri *batchJobInfo) getJobReportPath() (string, error) {
|
||||
var fileName string
|
||||
switch madmin.BatchJobType(ri.JobType) {
|
||||
case madmin.BatchJobReplicate:
|
||||
fileName = batchReplName
|
||||
case madmin.BatchJobKeyRotate:
|
||||
fileName = batchKeyRotationName
|
||||
case madmin.BatchJobExpire:
|
||||
fileName = batchExpireName
|
||||
default:
|
||||
return "", fmt.Errorf("unknown job type: %v", ri.JobType)
|
||||
}
|
||||
return pathJoin(batchJobReportsPrefix, ri.JobID, fileName), nil
|
||||
}
|
||||
|
||||
func (ri *batchJobInfo) loadOrInit(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
|
||||
err := ri.load(ctx, api, job)
|
||||
if errors.Is(err, errNoSuchJob) {
|
||||
switch {
|
||||
case job.Replicate != nil:
|
||||
ri.Version = batchReplVersionV1
|
||||
case job.KeyRotate != nil:
|
||||
ri.Version = batchKeyRotateVersionV1
|
||||
case job.Expire != nil:
|
||||
ri.Version = batchExpireVersionV1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ri *batchJobInfo) load(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
|
||||
path, err := job.getJobReportPath()
|
||||
if err != nil {
|
||||
batchLogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
return ri.loadByPath(ctx, api, path)
|
||||
}
|
||||
|
||||
func (ri *batchJobInfo) loadByPath(ctx context.Context, api ObjectLayer, path string) error {
|
||||
var format, version uint16
|
||||
switch {
|
||||
case job.Replicate != nil:
|
||||
switch filepath.Base(path) {
|
||||
case batchReplName:
|
||||
version = batchReplVersionV1
|
||||
format = batchReplFormat
|
||||
case job.KeyRotate != nil:
|
||||
case batchKeyRotationName:
|
||||
version = batchKeyRotateVersionV1
|
||||
format = batchKeyRotationFormat
|
||||
case job.Expire != nil:
|
||||
case batchExpireName:
|
||||
version = batchExpireVersionV1
|
||||
format = batchExpireFormat
|
||||
default:
|
||||
return errors.New("no supported batch job request specified")
|
||||
}
|
||||
data, err := readConfig(ctx, api, getJobReportPath(job))
|
||||
|
||||
data, err := readConfig(ctx, api, path)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) || isErrObjectNotFound(err) {
|
||||
ri.Version = int(version)
|
||||
switch {
|
||||
case job.Replicate != nil:
|
||||
ri.RetryAttempts = batchReplJobDefaultRetries
|
||||
if job.Replicate.Flags.Retry.Attempts > 0 {
|
||||
ri.RetryAttempts = job.Replicate.Flags.Retry.Attempts
|
||||
}
|
||||
case job.KeyRotate != nil:
|
||||
ri.RetryAttempts = batchKeyRotateJobDefaultRetries
|
||||
if job.KeyRotate.Flags.Retry.Attempts > 0 {
|
||||
ri.RetryAttempts = job.KeyRotate.Flags.Retry.Attempts
|
||||
}
|
||||
case job.Expire != nil:
|
||||
ri.RetryAttempts = batchExpireJobDefaultRetries
|
||||
if job.Expire.Retry.Attempts > 0 {
|
||||
ri.RetryAttempts = job.Expire.Retry.Attempts
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return errNoSuchJob
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -919,7 +957,12 @@ func (ri *batchJobInfo) updateAfter(ctx context.Context, api ObjectLayer, durati
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return saveConfig(ctx, api, getJobReportPath(job), buf)
|
||||
path, err := ri.getJobReportPath()
|
||||
if err != nil {
|
||||
batchLogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
return saveConfig(ctx, api, path, buf)
|
||||
}
|
||||
ri.mu.Unlock()
|
||||
return nil
|
||||
@@ -944,8 +987,10 @@ func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo,
|
||||
ri.mu.Lock()
|
||||
defer ri.mu.Unlock()
|
||||
|
||||
ri.Bucket = bucket
|
||||
ri.Object = info.Name
|
||||
if success {
|
||||
ri.Bucket = bucket
|
||||
ri.Object = info.Name
|
||||
}
|
||||
ri.countItem(info.Size, info.DeleteMarker, success, attempt)
|
||||
}
|
||||
|
||||
@@ -971,7 +1016,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
JobType: string(job.Type()),
|
||||
StartTime: job.Started,
|
||||
}
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
if err := ri.loadOrInit(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
if ri.Complete {
|
||||
@@ -980,29 +1025,34 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
lastObject := ri.Object
|
||||
|
||||
retryAttempts := job.Replicate.Flags.Retry.Attempts
|
||||
if retryAttempts <= 0 {
|
||||
retryAttempts = batchReplJobDefaultRetries
|
||||
}
|
||||
delay := job.Replicate.Flags.Retry.Delay
|
||||
if delay == 0 {
|
||||
if delay <= 0 {
|
||||
delay = batchReplJobDefaultRetryDelay
|
||||
}
|
||||
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
selectObj := func(info FileInfo) (ok bool) {
|
||||
if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan {
|
||||
if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan.D() {
|
||||
// skip all objects that are newer than specified older duration
|
||||
return false
|
||||
}
|
||||
|
||||
if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan {
|
||||
if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan.D() {
|
||||
// skip all objects that are older than specified newer duration
|
||||
return false
|
||||
}
|
||||
|
||||
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(info.ModTime) {
|
||||
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.After(info.ModTime) {
|
||||
// skip all objects that are created before the specified time.
|
||||
return false
|
||||
}
|
||||
|
||||
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(info.ModTime) {
|
||||
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.Before(info.ModTime) {
|
||||
// skip all objects that are created after the specified time.
|
||||
return false
|
||||
}
|
||||
@@ -1071,99 +1121,109 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
|
||||
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
|
||||
|
||||
var (
|
||||
walkCh = make(chan itemOrErr[ObjectInfo], 100)
|
||||
slowCh = make(chan itemOrErr[ObjectInfo], 100)
|
||||
)
|
||||
|
||||
if !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() {
|
||||
go func() {
|
||||
// Snowball currently needs the high level minio-go Client, not the Core one
|
||||
cl, err := miniogo.New(u.Host, &miniogo.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
BucketLookup: lookupStyle(r.Target.Path),
|
||||
})
|
||||
if err != nil {
|
||||
batchLogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Already validated before arriving here
|
||||
smallerThan, _ := humanize.ParseBytes(*r.Source.Snowball.SmallerThan)
|
||||
|
||||
batch := make([]ObjectInfo, 0, *r.Source.Snowball.Batch)
|
||||
writeFn := func(batch []ObjectInfo) {
|
||||
if len(batch) > 0 {
|
||||
if err := r.writeAsArchive(ctx, api, cl, batch); err != nil {
|
||||
batchLogIf(ctx, err)
|
||||
for _, b := range batch {
|
||||
slowCh <- itemOrErr[ObjectInfo]{Item: b}
|
||||
}
|
||||
} else {
|
||||
ri.trackCurrentBucketBatch(r.Source.Bucket, batch)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
}
|
||||
}
|
||||
}
|
||||
for obj := range walkCh {
|
||||
if obj.Item.DeleteMarker || !obj.Item.VersionPurgeStatus.Empty() || obj.Item.Size >= int64(smallerThan) {
|
||||
slowCh <- obj
|
||||
continue
|
||||
}
|
||||
|
||||
batch = append(batch, obj.Item)
|
||||
|
||||
if len(batch) < *r.Source.Snowball.Batch {
|
||||
continue
|
||||
}
|
||||
writeFn(batch)
|
||||
batch = batch[:0]
|
||||
}
|
||||
writeFn(batch)
|
||||
xioutil.SafeClose(slowCh)
|
||||
}()
|
||||
} else {
|
||||
slowCh = walkCh
|
||||
}
|
||||
|
||||
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wk, err := workers.New(workerSize)
|
||||
if err != nil {
|
||||
// invalid worker size.
|
||||
return err
|
||||
}
|
||||
|
||||
walkQuorum := env.Get("_MINIO_BATCH_REPLICATION_WALK_QUORUM", "strict")
|
||||
if walkQuorum == "" {
|
||||
walkQuorum = "strict"
|
||||
}
|
||||
|
||||
retryAttempts := ri.RetryAttempts
|
||||
retry := false
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
attempts := attempts
|
||||
var (
|
||||
walkCh = make(chan itemOrErr[ObjectInfo], 100)
|
||||
slowCh = make(chan itemOrErr[ObjectInfo], 100)
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
if r.Source.Snowball.Disable != nil && !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() {
|
||||
go func() {
|
||||
// Snowball currently needs the high level minio-go Client, not the Core one
|
||||
cl, err := miniogo.New(u.Host, &miniogo.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
BucketLookup: lookupStyle(r.Target.Path),
|
||||
})
|
||||
if err != nil {
|
||||
batchLogOnceIf(ctx, err, job.ID+"miniogo.New")
|
||||
return
|
||||
}
|
||||
|
||||
// Already validated before arriving here
|
||||
smallerThan, _ := humanize.ParseBytes(*r.Source.Snowball.SmallerThan)
|
||||
|
||||
batch := make([]ObjectInfo, 0, *r.Source.Snowball.Batch)
|
||||
writeFn := func(batch []ObjectInfo) {
|
||||
if len(batch) > 0 {
|
||||
if err := r.writeAsArchive(ctx, api, cl, batch, r.Target.Prefix); err != nil {
|
||||
batchLogOnceIf(ctx, err, job.ID+"writeAsArchive")
|
||||
for _, b := range batch {
|
||||
slowCh <- itemOrErr[ObjectInfo]{Item: b}
|
||||
}
|
||||
} else {
|
||||
ri.trackCurrentBucketBatch(r.Source.Bucket, batch)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
batchLogOnceIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job), job.ID+"updateAfter")
|
||||
}
|
||||
}
|
||||
}
|
||||
for obj := range walkCh {
|
||||
if obj.Item.DeleteMarker || !obj.Item.VersionPurgeStatus.Empty() || obj.Item.Size >= int64(smallerThan) {
|
||||
slowCh <- obj
|
||||
continue
|
||||
}
|
||||
|
||||
batch = append(batch, obj.Item)
|
||||
|
||||
if len(batch) < *r.Source.Snowball.Batch {
|
||||
continue
|
||||
}
|
||||
writeFn(batch)
|
||||
batch = batch[:0]
|
||||
}
|
||||
writeFn(batch)
|
||||
xioutil.SafeClose(slowCh)
|
||||
}()
|
||||
} else {
|
||||
slowCh = walkCh
|
||||
}
|
||||
|
||||
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wk, err := workers.New(workerSize)
|
||||
if err != nil {
|
||||
// invalid worker size.
|
||||
return err
|
||||
}
|
||||
|
||||
walkQuorum := env.Get("_MINIO_BATCH_REPLICATION_WALK_QUORUM", "strict")
|
||||
if walkQuorum == "" {
|
||||
walkQuorum = "strict"
|
||||
}
|
||||
ctx, cancelCause := context.WithCancelCause(ctx)
|
||||
// one of source/target is s3, skip delete marker and all versions under the same object name.
|
||||
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
|
||||
|
||||
if err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, walkCh, WalkOptions{
|
||||
Marker: lastObject,
|
||||
Filter: selectObj,
|
||||
AskDisks: walkQuorum,
|
||||
}); err != nil {
|
||||
cancel()
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
prefixes := r.Source.Prefix.F()
|
||||
if len(prefixes) == 0 {
|
||||
prefixes = []string{""}
|
||||
}
|
||||
for _, prefix := range prefixes {
|
||||
prefixWalkCh := make(chan itemOrErr[ObjectInfo], 100)
|
||||
if err := api.Walk(ctx, r.Source.Bucket, prefix, prefixWalkCh, WalkOptions{
|
||||
Marker: lastObject,
|
||||
Filter: selectObj,
|
||||
AskDisks: walkQuorum,
|
||||
}); err != nil {
|
||||
cancelCause(err)
|
||||
xioutil.SafeClose(walkCh)
|
||||
return
|
||||
}
|
||||
for obj := range prefixWalkCh {
|
||||
walkCh <- obj
|
||||
}
|
||||
}
|
||||
xioutil.SafeClose(walkCh)
|
||||
}()
|
||||
|
||||
prevObj := ""
|
||||
|
||||
@@ -1171,7 +1231,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
for res := range slowCh {
|
||||
if res.Err != nil {
|
||||
ri.Failed = true
|
||||
batchLogIf(ctx, res.Err)
|
||||
batchLogOnceIf(ctx, res.Err, job.ID+"res.Err")
|
||||
continue
|
||||
}
|
||||
result := res.Item
|
||||
@@ -1198,7 +1258,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
return
|
||||
}
|
||||
stopFn(result, err)
|
||||
batchLogIf(ctx, err)
|
||||
batchLogOnceIf(ctx, err, job.ID+"ReplicateToTarget")
|
||||
success = false
|
||||
} else {
|
||||
stopFn(result, nil)
|
||||
@@ -1206,7 +1266,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
ri.trackCurrentBucketObject(r.Source.Bucket, result, success, attempts)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
batchLogOnceIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job), job.ID+"updateAfter2")
|
||||
|
||||
if wait := globalBatchConfig.ReplicationWait(); wait > 0 {
|
||||
time.Sleep(wait)
|
||||
@@ -1214,20 +1274,23 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
}()
|
||||
}
|
||||
wk.Wait()
|
||||
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
if context.Cause(ctx) != nil {
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
ri.RetryAttempts = attempts
|
||||
ri.Complete = ri.ObjectsFailed == 0
|
||||
ri.Failed = ri.ObjectsFailed > 0
|
||||
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
batchLogOnceIf(ctx, ri.updateAfter(ctx, api, 0, job), job.ID+"updateAfter3")
|
||||
|
||||
if err := r.Notify(ctx, ri); err != nil {
|
||||
batchLogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
||||
batchLogOnceIf(ctx, fmt.Errorf("unable to notify %v", err), job.ID+"notify")
|
||||
}
|
||||
|
||||
cancel()
|
||||
cancelCause(nil)
|
||||
if ri.Failed {
|
||||
ri.ObjectsFailed = 0
|
||||
ri.Bucket = ""
|
||||
@@ -1436,10 +1499,24 @@ func (j BatchJobRequest) Validate(ctx context.Context, o ObjectLayer) error {
|
||||
}
|
||||
|
||||
func (j BatchJobRequest) delete(ctx context.Context, api ObjectLayer) {
|
||||
deleteConfig(ctx, api, getJobReportPath(j))
|
||||
deleteConfig(ctx, api, getJobPath(j))
|
||||
}
|
||||
|
||||
func (j BatchJobRequest) getJobReportPath() (string, error) {
|
||||
var fileName string
|
||||
switch {
|
||||
case j.Replicate != nil:
|
||||
fileName = batchReplName
|
||||
case j.KeyRotate != nil:
|
||||
fileName = batchKeyRotationName
|
||||
case j.Expire != nil:
|
||||
fileName = batchExpireName
|
||||
default:
|
||||
return "", errors.New("unknown job type")
|
||||
}
|
||||
return pathJoin(batchJobReportsPrefix, j.ID, fileName), nil
|
||||
}
|
||||
|
||||
func (j *BatchJobRequest) save(ctx context.Context, api ObjectLayer) error {
|
||||
if j.Replicate == nil && j.KeyRotate == nil && j.Expire == nil {
|
||||
return errInvalidArgument
|
||||
@@ -1476,7 +1553,7 @@ func (j *BatchJobRequest) load(ctx context.Context, api ObjectLayer, name string
|
||||
|
||||
func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
|
||||
// TODO: support custom storage class for remote replication
|
||||
putOpts, err = putReplicationOpts(ctx, "", objInfo)
|
||||
putOpts, err = putReplicationOpts(ctx, "", objInfo, 0)
|
||||
if err != nil {
|
||||
return putOpts, err
|
||||
}
|
||||
@@ -1500,9 +1577,6 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
jobType := r.Form.Get("jobType")
|
||||
if jobType == "" {
|
||||
jobType = string(madmin.BatchJobReplicate)
|
||||
}
|
||||
|
||||
resultCh := make(chan itemOrErr[ObjectInfo])
|
||||
|
||||
@@ -1520,6 +1594,9 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, result.Err), r.URL)
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(result.Item.Name, batchJobReportsPrefix+slashSeparator) {
|
||||
continue
|
||||
}
|
||||
req := &BatchJobRequest{}
|
||||
if err := req.load(ctx, objectAPI, result.Item.Name); err != nil {
|
||||
if !errors.Is(err, errNoSuchJob) {
|
||||
@@ -1528,7 +1605,7 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
|
||||
continue
|
||||
}
|
||||
|
||||
if jobType == string(req.Type()) {
|
||||
if jobType == string(req.Type()) || jobType == "" {
|
||||
listResult.Jobs = append(listResult.Jobs, madmin.BatchJobResult{
|
||||
ID: req.ID,
|
||||
Type: req.Type(),
|
||||
@@ -1542,6 +1619,55 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
|
||||
batchLogIf(ctx, json.NewEncoder(w).Encode(&listResult))
|
||||
}
|
||||
|
||||
// BatchJobStatus - returns the status of a batch job saved in the disk
|
||||
func (a adminAPIHandlers) BatchJobStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ListBatchJobsAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
jobID := r.Form.Get("jobId")
|
||||
if jobID == "" {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
req := BatchJobRequest{ID: jobID}
|
||||
if i := strings.Index(jobID, "-"); i > 0 {
|
||||
switch madmin.BatchJobType(jobID[:i]) {
|
||||
case madmin.BatchJobReplicate:
|
||||
req.Replicate = &BatchJobReplicateV1{}
|
||||
case madmin.BatchJobKeyRotate:
|
||||
req.KeyRotate = &BatchJobKeyRotateV1{}
|
||||
case madmin.BatchJobExpire:
|
||||
req.Expire = &BatchJobExpire{}
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, errors.New("job ID format unrecognized")), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ri := &batchJobInfo{}
|
||||
if err := ri.load(ctx, objectAPI, req); err != nil {
|
||||
if !errors.Is(err, errNoSuchJob) {
|
||||
batchLogIf(ctx, err)
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(madmin.BatchJobStatus{LastMetric: ri.metric()})
|
||||
if err != nil {
|
||||
batchLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
var errNoSuchJob = errors.New("no such job")
|
||||
|
||||
// DescribeBatchJob returns the currently active batch job definition
|
||||
@@ -1579,7 +1705,7 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// StarBatchJob queue a new job for execution
|
||||
// StartBatchJob queue a new job for execution
|
||||
func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@@ -1633,7 +1759,7 @@ func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
job.ID = fmt.Sprintf("%s:%d", shortuuid.New(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
job.ID = fmt.Sprintf("%s-%s%s%d", job.Type(), shortuuid.New(), getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
job.User = user
|
||||
job.Started = time.Now()
|
||||
|
||||
@@ -1721,11 +1847,60 @@ func newBatchJobPool(ctx context.Context, o ObjectLayer, workers int) *BatchJobP
|
||||
jobCancelers: make(map[string]context.CancelFunc),
|
||||
}
|
||||
jpool.ResizeWorkers(workers)
|
||||
jpool.resume()
|
||||
|
||||
randomWait := func() time.Duration {
|
||||
// randomWait depends on the number of nodes to avoid triggering resume and cleanups at the same time.
|
||||
return time.Duration(rand.Float64() * float64(time.Duration(globalEndpoints.NEndpoints())*time.Hour))
|
||||
}
|
||||
|
||||
go func() {
|
||||
jpool.resume(randomWait)
|
||||
jpool.cleanupReports(randomWait)
|
||||
}()
|
||||
|
||||
return jpool
|
||||
}
|
||||
|
||||
func (j *BatchJobPool) resume() {
|
||||
func (j *BatchJobPool) cleanupReports(randomWait func() time.Duration) {
|
||||
t := time.NewTimer(randomWait())
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-GlobalContext.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
results := make(chan itemOrErr[ObjectInfo], 100)
|
||||
ctx, cancel := context.WithCancel(j.ctx)
|
||||
defer cancel()
|
||||
if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobReportsPrefix, results, WalkOptions{}); err != nil {
|
||||
batchLogIf(j.ctx, err)
|
||||
t.Reset(randomWait())
|
||||
continue
|
||||
}
|
||||
for result := range results {
|
||||
if result.Err != nil {
|
||||
batchLogIf(j.ctx, result.Err)
|
||||
continue
|
||||
}
|
||||
ri := &batchJobInfo{}
|
||||
if err := ri.loadByPath(ctx, j.objLayer, result.Item.Name); err != nil {
|
||||
batchLogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
if (ri.Complete || ri.Failed) && time.Since(ri.LastUpdate) > oldJobsExpiration {
|
||||
deleteConfig(ctx, j.objLayer, result.Item.Name)
|
||||
}
|
||||
}
|
||||
|
||||
t.Reset(randomWait())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (j *BatchJobPool) resume(randomWait func() time.Duration) {
|
||||
time.Sleep(randomWait())
|
||||
|
||||
results := make(chan itemOrErr[ObjectInfo], 100)
|
||||
ctx, cancel := context.WithCancel(j.ctx)
|
||||
defer cancel()
|
||||
@@ -1738,6 +1913,9 @@ func (j *BatchJobPool) resume() {
|
||||
batchLogIf(j.ctx, result.Err)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(result.Item.Name, batchJobReportsPrefix+slashSeparator) {
|
||||
continue
|
||||
}
|
||||
// ignore batch-replicate.bin and batch-rotate.bin entries
|
||||
if strings.HasSuffix(result.Item.Name, slashSeparator) {
|
||||
continue
|
||||
@@ -1808,7 +1986,6 @@ func (j *BatchJobPool) AddWorker() {
|
||||
}
|
||||
}
|
||||
}
|
||||
job.delete(j.ctx, j.objLayer)
|
||||
j.canceler(job.ID, false)
|
||||
case <-j.workerKillCh:
|
||||
return
|
||||
@@ -1869,7 +2046,9 @@ func (j *BatchJobPool) canceler(jobID string, cancel bool) error {
|
||||
canceler()
|
||||
}
|
||||
}
|
||||
delete(j.jobCancelers, jobID)
|
||||
if cancel {
|
||||
delete(j.jobCancelers, jobID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1988,18 +2167,49 @@ func (m *batchJobMetrics) purgeJobMetrics() {
|
||||
var toDeleteJobMetrics []string
|
||||
m.RLock()
|
||||
for id, metrics := range m.metrics {
|
||||
if time.Since(metrics.LastUpdate) > 24*time.Hour && (metrics.Complete || metrics.Failed) {
|
||||
if time.Since(metrics.LastUpdate) > oldJobsExpiration && (metrics.Complete || metrics.Failed) {
|
||||
toDeleteJobMetrics = append(toDeleteJobMetrics, id)
|
||||
}
|
||||
}
|
||||
m.RUnlock()
|
||||
for _, jobID := range toDeleteJobMetrics {
|
||||
m.delete(jobID)
|
||||
j := BatchJobRequest{
|
||||
ID: jobID,
|
||||
}
|
||||
j.delete(GlobalContext, newObjectLayerFn())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// load metrics from disk on startup
|
||||
func (m *batchJobMetrics) init(ctx context.Context, objectAPI ObjectLayer) error {
|
||||
resultCh := make(chan itemOrErr[ObjectInfo])
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := objectAPI.Walk(ctx, minioMetaBucket, batchJobReportsPrefix, resultCh, WalkOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for result := range resultCh {
|
||||
if result.Err != nil {
|
||||
return result.Err
|
||||
}
|
||||
ri := &batchJobInfo{}
|
||||
if err := ri.loadByPath(ctx, objectAPI, result.Item.Name); err != nil {
|
||||
if !errors.Is(err, errNoSuchJob) {
|
||||
batchLogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
m.metrics[ri.JobID] = ri
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *batchJobMetrics) delete(jobID string) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
@@ -2077,3 +2287,30 @@ func lookupStyle(s string) miniogo.BucketLookupType {
|
||||
}
|
||||
return lookup
|
||||
}
|
||||
|
||||
// BatchJobPrefix - to support prefix field yaml unmarshalling with string or slice of strings
|
||||
type BatchJobPrefix []string
|
||||
|
||||
var _ yaml.Unmarshaler = &BatchJobPrefix{}
|
||||
|
||||
// UnmarshalYAML - to support prefix field yaml unmarshalling with string or slice of strings
|
||||
func (b *BatchJobPrefix) UnmarshalYAML(value *yaml.Node) error {
|
||||
// try slice first
|
||||
tmpSlice := []string{}
|
||||
if err := value.Decode(&tmpSlice); err == nil {
|
||||
*b = tmpSlice
|
||||
return nil
|
||||
}
|
||||
// try string
|
||||
tmpStr := ""
|
||||
if err := value.Decode(&tmpStr); err == nil {
|
||||
*b = []string{tmpStr}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unable to decode %s", value.Value)
|
||||
}
|
||||
|
||||
// F - return prefix(es) as slice
|
||||
func (b *BatchJobPrefix) F() []string {
|
||||
return *b
|
||||
}
|
||||
|
||||
@@ -6,6 +6,89 @@ import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobPrefix) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if cap((*z)) >= int(zb0002) {
|
||||
(*z) = (*z)[:zb0002]
|
||||
} else {
|
||||
(*z) = make(BatchJobPrefix, zb0002)
|
||||
}
|
||||
for zb0001 := range *z {
|
||||
(*z)[zb0001], err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z BatchJobPrefix) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = en.WriteArrayHeader(uint32(len(z)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0003 := range z {
|
||||
err = en.WriteString(z[zb0003])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0003)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z BatchJobPrefix) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z)))
|
||||
for zb0003 := range z {
|
||||
o = msgp.AppendString(o, z[zb0003])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobPrefix) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if cap((*z)) >= int(zb0002) {
|
||||
(*z) = (*z)[:zb0002]
|
||||
} else {
|
||||
(*z) = make(BatchJobPrefix, zb0002)
|
||||
}
|
||||
for zb0001 := range *z {
|
||||
(*z)[zb0001], bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z BatchJobPrefix) Msgsize() (s int) {
|
||||
s = msgp.ArrayHeaderSize
|
||||
for zb0003 := range z {
|
||||
s += msgp.StringPrefixSize + len(z[zb0003])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobRequest) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
|
||||
@@ -9,6 +9,119 @@ import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalBatchJobPrefix(t *testing.T) {
|
||||
v := BatchJobPrefix{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobPrefix(t *testing.T) {
|
||||
v := BatchJobPrefix{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobPrefix Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobPrefix{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBatchJobRequest(t *testing.T) {
|
||||
v := BatchJobRequest{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
|
||||
75
cmd/batch-handlers_test.go
Normal file
75
cmd/batch-handlers_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestBatchJobPrefix_UnmarshalYAML(t *testing.T) {
|
||||
type args struct {
|
||||
yamlStr string
|
||||
}
|
||||
type PrefixTemp struct {
|
||||
Prefix BatchJobPrefix `yaml:"prefix"`
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
b PrefixTemp
|
||||
args args
|
||||
want []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "test1",
|
||||
b: PrefixTemp{},
|
||||
args: args{
|
||||
yamlStr: `
|
||||
prefix: "foo"
|
||||
`,
|
||||
},
|
||||
want: []string{"foo"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "test2",
|
||||
b: PrefixTemp{},
|
||||
args: args{
|
||||
yamlStr: `
|
||||
prefix:
|
||||
- "foo"
|
||||
- "bar"
|
||||
`,
|
||||
},
|
||||
want: []string{"foo", "bar"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := yaml.Unmarshal([]byte(tt.args.yamlStr), &tt.b); (err != nil) != tt.wantErr {
|
||||
t.Errorf("UnmarshalYAML() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if !slices.Equal(tt.b.Prefix.F(), tt.want) {
|
||||
t.Errorf("UnmarshalYAML() = %v, want %v", tt.b.Prefix.F(), tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
"time"
|
||||
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v3/xtime"
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE
|
||||
@@ -65,12 +65,12 @@ import (
|
||||
|
||||
// BatchReplicateFilter holds all the filters currently supported for batch replication
|
||||
type BatchReplicateFilter struct {
|
||||
NewerThan time.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
|
||||
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
|
||||
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
NewerThan xtime.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
|
||||
OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
|
||||
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
}
|
||||
|
||||
// BatchJobReplicateFlags various configurations for replication job definition currently includes
|
||||
@@ -151,7 +151,7 @@ func (t BatchJobReplicateTarget) ValidPath() bool {
|
||||
type BatchJobReplicateSource struct {
|
||||
Type BatchJobReplicateResourceType `yaml:"type" json:"type"`
|
||||
Bucket string `yaml:"bucket" json:"bucket"`
|
||||
Prefix string `yaml:"prefix" json:"prefix"`
|
||||
Prefix BatchJobPrefix `yaml:"prefix" json:"prefix"`
|
||||
Endpoint string `yaml:"endpoint" json:"endpoint"`
|
||||
Path string `yaml:"path" json:"path"`
|
||||
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
|
||||
|
||||
@@ -411,7 +411,7 @@ func (z *BatchJobReplicateSource) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, err = dc.ReadString()
|
||||
err = z.Prefix.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -514,7 +514,7 @@ func (z *BatchJobReplicateSource) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Prefix)
|
||||
err = z.Prefix.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -600,7 +600,11 @@ func (z *BatchJobReplicateSource) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendString(o, z.Bucket)
|
||||
// string "Prefix"
|
||||
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
o = msgp.AppendString(o, z.Prefix)
|
||||
o, err = z.Prefix.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
// string "Endpoint"
|
||||
o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
o = msgp.AppendString(o, z.Endpoint)
|
||||
@@ -664,7 +668,7 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
|
||||
bts, err = z.Prefix.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -742,7 +746,7 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobReplicateSource) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + 9 + z.Snowball.Msgsize()
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + z.Prefix.Msgsize() + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + 9 + z.Snowball.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1409,13 +1413,13 @@ func (z *BatchReplicateFilter) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "NewerThan":
|
||||
z.NewerThan, err = dc.ReadDuration()
|
||||
err = z.NewerThan.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
}
|
||||
case "OlderThan":
|
||||
z.OlderThan, err = dc.ReadDuration()
|
||||
err = z.OlderThan.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -1489,7 +1493,7 @@ func (z *BatchReplicateFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.NewerThan)
|
||||
err = z.NewerThan.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
@@ -1499,7 +1503,7 @@ func (z *BatchReplicateFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.OlderThan)
|
||||
err = z.OlderThan.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -1567,10 +1571,18 @@ func (z *BatchReplicateFilter) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// map header, size 6
|
||||
// string "NewerThan"
|
||||
o = append(o, 0x86, 0xa9, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.NewerThan)
|
||||
o, err = z.NewerThan.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
}
|
||||
// string "OlderThan"
|
||||
o = append(o, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.OlderThan)
|
||||
o, err = z.OlderThan.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
// string "CreatedAfter"
|
||||
o = append(o, 0xac, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72)
|
||||
o = msgp.AppendTime(o, z.CreatedAfter)
|
||||
@@ -1619,13 +1631,13 @@ func (z *BatchReplicateFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "NewerThan":
|
||||
z.NewerThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
bts, err = z.NewerThan.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
}
|
||||
case "OlderThan":
|
||||
z.OlderThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
bts, err = z.OlderThan.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -1694,7 +1706,7 @@ func (z *BatchReplicateFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchReplicateFilter) Msgsize() (s int) {
|
||||
s = 1 + 10 + msgp.DurationSize + 10 + msgp.DurationSize + 13 + msgp.TimeSize + 14 + msgp.TimeSize + 5 + msgp.ArrayHeaderSize
|
||||
s = 1 + 10 + z.NewerThan.Msgsize() + 10 + z.OlderThan.Msgsize() + 13 + msgp.TimeSize + 14 + msgp.TimeSize + 5 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Tags {
|
||||
s += z.Tags[za0001].Msgsize()
|
||||
}
|
||||
|
||||
182
cmd/batch-replicate_test.go
Normal file
182
cmd/batch-replicate_test.go
Normal file
@@ -0,0 +1,182 @@
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestParseBatchJobReplicate(t *testing.T) {
|
||||
replicateYaml := `
|
||||
replicate:
|
||||
apiVersion: v1
|
||||
# source of the objects to be replicated
|
||||
source:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: object-prefix1 # 'PREFIX' is optional
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
# endpoint: "http://127.0.0.1:9000"
|
||||
# # path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
# credentials:
|
||||
# accessKey: minioadmin # Required
|
||||
# secretKey: minioadmin # Required
|
||||
# # sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
snowball: # automatically activated if the source is local
|
||||
disable: true # optionally turn-off snowball archive transfer
|
||||
# batch: 100 # upto this many objects per archive
|
||||
# inmemory: true # indicates if the archive must be staged locally or in-memory
|
||||
# compress: false # S2/Snappy compressed archive
|
||||
# smallerThan: 5MiB # create archive for all objects smaller than 5MiB
|
||||
# skipErrs: false # skips any source side read() errors
|
||||
|
||||
# target where the objects must be replicated
|
||||
target:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: stage # 'PREFIX' is optional
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
endpoint: "http://127.0.0.1:9001"
|
||||
# path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
credentials:
|
||||
accessKey: minioadmin
|
||||
secretKey: minioadmin
|
||||
# sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
|
||||
# NOTE: All flags are optional
|
||||
# - filtering criteria only applies for all source objects match the criteria
|
||||
# - configurable notification endpoints
|
||||
# - configurable retries for the job (each retry skips successfully previously replaced objects)
|
||||
flags:
|
||||
filter:
|
||||
newerThan: "7d10h31s" # match objects newer than this value (e.g. 7d10h31s)
|
||||
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
|
||||
# createdAfter: "date" # match objects created after "date"
|
||||
# createdBefore: "date" # match objects created before "date"
|
||||
|
||||
## NOTE: tags are not supported when "source" is remote.
|
||||
tags:
|
||||
- key: "name"
|
||||
value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
|
||||
|
||||
metadata:
|
||||
- key: "content-type"
|
||||
value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
|
||||
|
||||
# notify:
|
||||
# endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
|
||||
# token: "Bearer xxxxx" # optional authentication token for the notification endpoint
|
||||
#
|
||||
# retry:
|
||||
# attempts: 10 # number of retries for the job before giving up
|
||||
# delay: "500ms" # least amount of delay between each retry
|
||||
|
||||
`
|
||||
var job BatchJobRequest
|
||||
err := yaml.Unmarshal([]byte(replicateYaml), &job)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml", err)
|
||||
}
|
||||
if !slices.Equal(job.Replicate.Source.Prefix.F(), []string{"object-prefix1"}) {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml", err)
|
||||
}
|
||||
multiPrefixReplicateYaml := `
|
||||
replicate:
|
||||
apiVersion: v1
|
||||
# source of the objects to be replicated
|
||||
source:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: # 'PREFIX' is optional
|
||||
- object-prefix1
|
||||
- object-prefix2
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
# endpoint: "http://127.0.0.1:9000"
|
||||
# # path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
# credentials:
|
||||
# accessKey: minioadmin # Required
|
||||
# secretKey: minioadmin # Required
|
||||
# # sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
snowball: # automatically activated if the source is local
|
||||
disable: true # optionally turn-off snowball archive transfer
|
||||
# batch: 100 # upto this many objects per archive
|
||||
# inmemory: true # indicates if the archive must be staged locally or in-memory
|
||||
# compress: false # S2/Snappy compressed archive
|
||||
# smallerThan: 5MiB # create archive for all objects smaller than 5MiB
|
||||
# skipErrs: false # skips any source side read() errors
|
||||
|
||||
# target where the objects must be replicated
|
||||
target:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: stage # 'PREFIX' is optional
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
endpoint: "http://127.0.0.1:9001"
|
||||
# path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
credentials:
|
||||
accessKey: minioadmin
|
||||
secretKey: minioadmin
|
||||
# sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
|
||||
# NOTE: All flags are optional
|
||||
# - filtering criteria only applies for all source objects match the criteria
|
||||
# - configurable notification endpoints
|
||||
# - configurable retries for the job (each retry skips successfully previously replaced objects)
|
||||
flags:
|
||||
filter:
|
||||
newerThan: "7d10h31s" # match objects newer than this value (e.g. 7d10h31s)
|
||||
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
|
||||
# createdAfter: "date" # match objects created after "date"
|
||||
# createdBefore: "date" # match objects created before "date"
|
||||
|
||||
## NOTE: tags are not supported when "source" is remote.
|
||||
tags:
|
||||
- key: "name"
|
||||
value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
|
||||
|
||||
metadata:
|
||||
- key: "content-type"
|
||||
value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
|
||||
|
||||
# notify:
|
||||
# endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
|
||||
# token: "Bearer xxxxx" # optional authentication token for the notification endpoint
|
||||
#
|
||||
# retry:
|
||||
# attempts: 10 # number of retries for the job before giving up
|
||||
# delay: "500ms" # least amount of delay between each retry
|
||||
|
||||
`
|
||||
var multiPrefixJob BatchJobRequest
|
||||
err = yaml.Unmarshal([]byte(multiPrefixReplicateYaml), &multiPrefixJob)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml", err)
|
||||
}
|
||||
if !slices.Equal(multiPrefixJob.Replicate.Source.Prefix.F(), []string{"object-prefix1", "object-prefix2"}) {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml")
|
||||
}
|
||||
}
|
||||
@@ -33,8 +33,8 @@ import (
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
)
|
||||
|
||||
// keyrotate:
|
||||
@@ -95,6 +95,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
|
||||
if e.Type == ssekms && spaces {
|
||||
return crypto.ErrInvalidEncryptionKeyID
|
||||
}
|
||||
|
||||
if e.Type == ssekms && GlobalKMS != nil {
|
||||
ctx := kms.Context{}
|
||||
if e.Context != "" {
|
||||
@@ -113,7 +114,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
|
||||
e.kmsContext[k] = v
|
||||
}
|
||||
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
|
||||
if _, err := GlobalKMS.GenerateKey(GlobalContext, e.Key, ctx); err != nil {
|
||||
if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -256,7 +257,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
JobType: string(job.Type()),
|
||||
StartTime: job.Started,
|
||||
}
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
if err := ri.loadOrInit(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
if ri.Complete {
|
||||
@@ -266,8 +267,12 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
lastObject := ri.Object
|
||||
|
||||
retryAttempts := job.KeyRotate.Flags.Retry.Attempts
|
||||
if retryAttempts <= 0 {
|
||||
retryAttempts = batchKeyRotateJobDefaultRetries
|
||||
}
|
||||
delay := job.KeyRotate.Flags.Retry.Delay
|
||||
if delay == 0 {
|
||||
if delay <= 0 {
|
||||
delay = batchKeyRotateJobDefaultRetryDelay
|
||||
}
|
||||
|
||||
@@ -353,7 +358,6 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
return err
|
||||
}
|
||||
|
||||
retryAttempts := ri.RetryAttempts
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
results := make(chan itemOrErr[ObjectInfo], 100)
|
||||
@@ -388,6 +392,17 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
stopFn(result, err)
|
||||
batchLogIf(ctx, err)
|
||||
success = false
|
||||
if attempts >= retryAttempts {
|
||||
auditOptions := AuditLogOptions{
|
||||
Event: "KeyRotate",
|
||||
APIName: "StartBatchJob",
|
||||
Bucket: result.Bucket,
|
||||
Object: result.Name,
|
||||
VersionID: result.VersionID,
|
||||
Error: err.Error(),
|
||||
}
|
||||
auditLogInternal(ctx, auditOptions)
|
||||
}
|
||||
} else {
|
||||
stopFn(result, nil)
|
||||
}
|
||||
@@ -478,8 +493,5 @@ func (r *BatchJobKeyRotateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.Flags.Retry.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return r.Flags.Retry.Validate()
|
||||
}
|
||||
|
||||
@@ -26,15 +26,17 @@ import (
|
||||
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/ringbuffer"
|
||||
)
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow io.WriteCloser
|
||||
closeWithErr func(err error) error
|
||||
closeWithErr func(err error)
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
canClose *sync.WaitGroup
|
||||
byteBuf []byte
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
@@ -62,7 +64,10 @@ func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Close() error {
|
||||
// Close the underlying writer.
|
||||
// This will also flush the ring buffer if used.
|
||||
err := b.iow.Close()
|
||||
|
||||
// Wait for all data to be written before returning else it causes race conditions.
|
||||
// Race condition is because of io.PipeWriter implementation. i.e consider the following
|
||||
// sequent of operations:
|
||||
@@ -73,29 +78,34 @@ func (b *streamingBitrotWriter) Close() error {
|
||||
if b.canClose != nil {
|
||||
b.canClose.Wait()
|
||||
}
|
||||
|
||||
// Recycle the buffer.
|
||||
if b.byteBuf != nil {
|
||||
globalBytePoolCap.Load().Put(b.byteBuf)
|
||||
b.byteBuf = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// newStreamingBitrotWriterBuffer returns streaming bitrot writer implementation.
|
||||
// The output is written to the supplied writer w.
|
||||
func newStreamingBitrotWriterBuffer(w io.Writer, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) error {
|
||||
// Similar to CloseWithError on pipes we always return nil.
|
||||
return nil
|
||||
}}
|
||||
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) {}}
|
||||
}
|
||||
|
||||
// Returns streaming bitrot writer implementation.
|
||||
func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
r, w := io.Pipe()
|
||||
h := algo.New()
|
||||
buf := globalBytePoolCap.Load().Get()
|
||||
rb := ringbuffer.NewBuffer(buf[:cap(buf)]).SetBlocking(true)
|
||||
|
||||
bw := &streamingBitrotWriter{
|
||||
iow: ioutil.NewDeadlineWriter(w, globalDriveConfig.GetMaxTimeout()),
|
||||
closeWithErr: w.CloseWithError,
|
||||
iow: ioutil.NewDeadlineWriter(rb.WriteCloser(), globalDriveConfig.GetMaxTimeout()),
|
||||
closeWithErr: rb.CloseWithError,
|
||||
h: h,
|
||||
shardSize: shardSize,
|
||||
canClose: &sync.WaitGroup{},
|
||||
byteBuf: buf,
|
||||
}
|
||||
bw.canClose.Add(1)
|
||||
go func() {
|
||||
@@ -106,7 +116,7 @@ func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath stri
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
r.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, r))
|
||||
rb.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, rb))
|
||||
}()
|
||||
return bw
|
||||
}
|
||||
@@ -131,13 +141,7 @@ func (b *streamingBitrotReader) Close() error {
|
||||
}
|
||||
if closer, ok := b.rc.(io.Closer); ok {
|
||||
// drain the body for connection reuse at network layer.
|
||||
xhttp.DrainBody(struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: b.rc,
|
||||
Closer: closeWrapper(func() error { return nil }),
|
||||
})
|
||||
xhttp.DrainBody(io.NopCloser(b.rc))
|
||||
return closer.Close()
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -19,9 +19,13 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -30,7 +34,7 @@ import (
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/grid"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
// To abstract a node over network.
|
||||
@@ -43,10 +47,15 @@ type ServerSystemConfig struct {
|
||||
NEndpoints int
|
||||
CmdLines []string
|
||||
MinioEnv map[string]string
|
||||
Checksum string
|
||||
}
|
||||
|
||||
// Diff - returns error on first difference found in two configs.
|
||||
func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error {
|
||||
if s1.Checksum != s2.Checksum {
|
||||
return fmt.Errorf("Expected MinIO binary checksum: %s, seen: %s", s1.Checksum, s2.Checksum)
|
||||
}
|
||||
|
||||
ns1 := s1.NEndpoints
|
||||
ns2 := s2.NEndpoints
|
||||
if ns1 != ns2 {
|
||||
@@ -82,7 +91,7 @@ func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error {
|
||||
extra = append(extra, k)
|
||||
}
|
||||
}
|
||||
msg := "Expected same MINIO_ environment variables and values across all servers: "
|
||||
msg := "Expected MINIO_* environment name and values across all servers to be same: "
|
||||
if len(missing) > 0 {
|
||||
msg += fmt.Sprintf(`Missing environment values: %v. `, missing)
|
||||
}
|
||||
@@ -97,14 +106,17 @@ func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error {
|
||||
}
|
||||
|
||||
var skipEnvs = map[string]struct{}{
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
"MINIO_SERVER_DEBUG": {},
|
||||
"MINIO_DSYNC_TRACE": {},
|
||||
"MINIO_ROOT_USER": {},
|
||||
"MINIO_ROOT_PASSWORD": {},
|
||||
"MINIO_ACCESS_KEY": {},
|
||||
"MINIO_SECRET_KEY": {},
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
"MINIO_SERVER_DEBUG": {},
|
||||
"MINIO_DSYNC_TRACE": {},
|
||||
"MINIO_ROOT_USER": {},
|
||||
"MINIO_ROOT_PASSWORD": {},
|
||||
"MINIO_ACCESS_KEY": {},
|
||||
"MINIO_SECRET_KEY": {},
|
||||
"MINIO_OPERATOR_VERSION": {},
|
||||
"MINIO_VSPHERE_PLUGIN_VERSION": {},
|
||||
"MINIO_CI_CD": {},
|
||||
}
|
||||
|
||||
func getServerSystemCfg() *ServerSystemConfig {
|
||||
@@ -120,7 +132,7 @@ func getServerSystemCfg() *ServerSystemConfig {
|
||||
}
|
||||
envValues[envK] = logger.HashString(env.Get(envK, ""))
|
||||
}
|
||||
scfg := &ServerSystemConfig{NEndpoints: globalEndpoints.NEndpoints(), MinioEnv: envValues}
|
||||
scfg := &ServerSystemConfig{NEndpoints: globalEndpoints.NEndpoints(), MinioEnv: envValues, Checksum: binaryChecksum}
|
||||
var cmdLines []string
|
||||
for _, ep := range globalEndpoints {
|
||||
cmdLines = append(cmdLines, ep.CmdLine)
|
||||
@@ -167,6 +179,26 @@ func (client *bootstrapRESTClient) String() string {
|
||||
return client.gridConn.String()
|
||||
}
|
||||
|
||||
var binaryChecksum = getBinaryChecksum()
|
||||
|
||||
func getBinaryChecksum() string {
|
||||
mw := md5.New()
|
||||
binPath, err := os.Executable()
|
||||
if err != nil {
|
||||
logger.Error("Calculating checksum failed: %s", err)
|
||||
return "00000000000000000000000000000000"
|
||||
}
|
||||
b, err := os.Open(binPath)
|
||||
if err != nil {
|
||||
logger.Error("Calculating checksum failed: %s", err)
|
||||
return "00000000000000000000000000000000"
|
||||
}
|
||||
|
||||
defer b.Close()
|
||||
io.Copy(mw, b)
|
||||
return hex.EncodeToString(mw.Sum(nil))
|
||||
}
|
||||
|
||||
func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointServerPools, gm *grid.Manager) error {
|
||||
srcCfg := getServerSystemCfg()
|
||||
clnts := newBootstrapRESTClients(endpointServerPools, gm)
|
||||
@@ -196,7 +228,7 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
|
||||
err := clnt.Verify(ctx, srcCfg)
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
bootstrapTraceMsg(fmt.Sprintf("clnt.Verify: %v, endpoint: %s", err, clnt))
|
||||
bootstrapTraceMsg(fmt.Sprintf("bootstrapVerify: %v, endpoint: %s", err, clnt))
|
||||
if !isNetworkError(err) {
|
||||
bootLogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String())
|
||||
incorrectConfigs = append(incorrectConfigs, fmt.Errorf("%s has incorrect configuration: %w", clnt, err))
|
||||
|
||||
@@ -79,6 +79,12 @@ func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
z.MinioEnv[za0002] = za0003
|
||||
}
|
||||
case "Checksum":
|
||||
z.Checksum, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -92,9 +98,9 @@ func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *ServerSystemConfig) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 3
|
||||
// map header, size 4
|
||||
// write "NEndpoints"
|
||||
err = en.Append(0x83, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
err = en.Append(0x84, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -142,15 +148,25 @@ func (z *ServerSystemConfig) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Checksum"
|
||||
err = en.Append(0xa8, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Checksum)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *ServerSystemConfig) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 3
|
||||
// map header, size 4
|
||||
// string "NEndpoints"
|
||||
o = append(o, 0x83, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
o = append(o, 0x84, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
o = msgp.AppendInt(o, z.NEndpoints)
|
||||
// string "CmdLines"
|
||||
o = append(o, 0xa8, 0x43, 0x6d, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x73)
|
||||
@@ -165,6 +181,9 @@ func (z *ServerSystemConfig) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendString(o, za0002)
|
||||
o = msgp.AppendString(o, za0003)
|
||||
}
|
||||
// string "Checksum"
|
||||
o = append(o, 0xa8, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d)
|
||||
o = msgp.AppendString(o, z.Checksum)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -241,6 +260,12 @@ func (z *ServerSystemConfig) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
z.MinioEnv[za0002] = za0003
|
||||
}
|
||||
case "Checksum":
|
||||
z.Checksum, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -266,5 +291,6 @@ func (z *ServerSystemConfig) Msgsize() (s int) {
|
||||
s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003)
|
||||
}
|
||||
}
|
||||
s += 9 + msgp.StringPrefixSize + len(z.Checksum)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -85,7 +85,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
|
||||
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{Name: kmsKey, AssociatedData: kmsContext})
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errKMSKeyNotFound), r.URL)
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
@@ -51,9 +52,9 @@ import (
|
||||
sse "github.com/minio/minio/internal/bucket/encryption"
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/config/cache"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/etag"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
@@ -61,8 +62,8 @@ import (
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/sync/errgroup"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -72,6 +73,8 @@ const (
|
||||
|
||||
xMinIOErrCodeHeader = "x-minio-error-code"
|
||||
xMinIOErrDescHeader = "x-minio-error-desc"
|
||||
|
||||
postPolicyBucketTagging = "tagging"
|
||||
)
|
||||
|
||||
// Check if there are buckets on server without corresponding entry in etcd backend and
|
||||
@@ -90,7 +93,7 @@ const (
|
||||
// -- If IP of the entry doesn't match, this means entry is
|
||||
//
|
||||
// for another instance. Log an error to console.
|
||||
func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
func initFederatorBackend(buckets []string, objLayer ObjectLayer) {
|
||||
if len(buckets) == 0 {
|
||||
return
|
||||
}
|
||||
@@ -111,10 +114,10 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
domainMissing := err == dns.ErrDomainMissing
|
||||
if dnsBuckets != nil {
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
r, ok := dnsBuckets[bucket.Name]
|
||||
bucketsSet.Add(bucket)
|
||||
r, ok := dnsBuckets[bucket]
|
||||
if !ok {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
bucketsToBeUpdated.Add(bucket)
|
||||
continue
|
||||
}
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
@@ -133,7 +136,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
bucketsToBeUpdated.Add(bucket)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -142,7 +145,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
bucketsInConflict.Add(bucket.Name)
|
||||
bucketsInConflict.Add(bucket)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,7 +230,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
// Generate response.
|
||||
encodedSuccessResponse := encodeResponse(LocationResponse{})
|
||||
// Get current region.
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
if region != globalMinioDefaultRegion {
|
||||
encodedSuccessResponse = encodeResponse(LocationResponse{
|
||||
Location: region,
|
||||
@@ -668,8 +671,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
continue
|
||||
}
|
||||
|
||||
defer globalCacheConfig.Delete(bucket, dobj.ObjectName)
|
||||
|
||||
if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == Pending) {
|
||||
// copy so we can re-add null ID.
|
||||
dobj := dobj
|
||||
@@ -888,6 +889,30 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
})
|
||||
}
|
||||
|
||||
// multipartReader is just like https://pkg.go.dev/net/http#Request.MultipartReader but
|
||||
// rejects multipart/mixed as its not supported in S3 API.
|
||||
func multipartReader(r *http.Request) (*multipart.Reader, error) {
|
||||
v := r.Header.Get("Content-Type")
|
||||
if v == "" {
|
||||
return nil, http.ErrNotMultipart
|
||||
}
|
||||
if r.Body == nil {
|
||||
return nil, errors.New("missing form body")
|
||||
}
|
||||
d, params, err := mime.ParseMediaType(v)
|
||||
if err != nil {
|
||||
return nil, http.ErrNotMultipart
|
||||
}
|
||||
if d != "multipart/form-data" {
|
||||
return nil, http.ErrNotMultipart
|
||||
}
|
||||
boundary, ok := params["boundary"]
|
||||
if !ok {
|
||||
return nil, http.ErrMissingBoundary
|
||||
}
|
||||
return multipart.NewReader(r.Body, boundary), nil
|
||||
}
|
||||
|
||||
// PostPolicyBucketHandler - POST policy
|
||||
// ----------
|
||||
// This implementation of the POST operation handles object creation with a specified
|
||||
@@ -921,9 +946,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEmptyRequestBody), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Here the parameter is the size of the form data that should
|
||||
// be loaded in memory, the remaining being put in temporary files.
|
||||
mp, err := r.MultipartReader()
|
||||
mp, err := multipartReader(r)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
@@ -935,7 +965,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
var (
|
||||
reader io.Reader
|
||||
fileSize int64 = -1
|
||||
actualSize int64 = -1
|
||||
fileName string
|
||||
fanOutEntries = make([]minio.PutObjectFanOutEntry, 0, 100)
|
||||
)
|
||||
@@ -943,6 +973,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
maxParts := 1000
|
||||
// Canonicalize the form values into http.Header.
|
||||
formValues := make(http.Header)
|
||||
var headerLen int64
|
||||
for {
|
||||
part, err := mp.NextRawPart()
|
||||
if errors.Is(err, io.EOF) {
|
||||
@@ -984,7 +1015,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
headerLen += int64(len(name)) + int64(len(fileName))
|
||||
if name != "file" {
|
||||
if http.CanonicalHeaderKey(name) == http.CanonicalHeaderKey("x-minio-fanout-list") {
|
||||
dec := json.NewDecoder(part)
|
||||
@@ -995,7 +1026,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if err := dec.Decode(&m); err != nil {
|
||||
part.Close()
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, multipart.ErrMessageTooLarge)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1005,8 +1036,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
continue
|
||||
}
|
||||
|
||||
buf := bytebufferpool.Get()
|
||||
// value, store as string in memory
|
||||
n, err := io.CopyN(&b, part, maxMemoryBytes+1)
|
||||
n, err := io.CopyN(buf, part, maxMemoryBytes+1)
|
||||
value := buf.String()
|
||||
buf.Reset()
|
||||
bytebufferpool.Put(buf)
|
||||
part.Close()
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
@@ -1028,7 +1063,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
formValues[http.CanonicalHeaderKey(name)] = append(formValues[http.CanonicalHeaderKey(name)], b.String())
|
||||
headerLen += n
|
||||
formValues[http.CanonicalHeaderKey(name)] = append(formValues[http.CanonicalHeaderKey(name)], value)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1037,6 +1073,21 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// The file or text content must be the last field in the form.
|
||||
// You cannot upload more than one file at a time.
|
||||
reader = part
|
||||
|
||||
possibleShardSize := (r.ContentLength - headerLen)
|
||||
if globalStorageClass.ShouldInline(possibleShardSize, false) { // keep versioned false for this check
|
||||
var b bytes.Buffer
|
||||
n, err := io.Copy(&b, reader)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
reader = &b
|
||||
actualSize = n
|
||||
}
|
||||
|
||||
// we have found the File part of the request we are done processing multipart-form
|
||||
break
|
||||
}
|
||||
@@ -1138,11 +1189,33 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(ctx, reader, fileSize, "", "", fileSize)
|
||||
clientETag, err := etag.FromContentMD5(formValues)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var forceMD5 []byte
|
||||
// Optimization: If SSE-KMS and SSE-C did not request Content-Md5. Use uuid as etag. Optionally enable this also
|
||||
// for server that is started with `--no-compat`.
|
||||
kind, _ := crypto.IsRequested(formValues)
|
||||
if !etag.ContentMD5Requested(formValues) && (kind == crypto.SSEC || kind == crypto.S3KMS || !globalServerCtxt.StrictS3Compat) {
|
||||
forceMD5 = mustGetUUIDBytes()
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReaderWithOpts(ctx, reader, hash.Options{
|
||||
Size: actualSize,
|
||||
MD5Hex: clientETag.String(),
|
||||
SHA256Hex: "",
|
||||
ActualSize: actualSize,
|
||||
DisableMD5: false,
|
||||
ForceMD5: forceMD5,
|
||||
})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if checksum != nil && checksum.Valid() {
|
||||
if err = hashReader.AddChecksumNoTrailer(formValues, false); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -1199,9 +1272,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
opts.WantChecksum = checksum
|
||||
|
||||
fanOutOpts := fanOutOptions{Checksum: checksum}
|
||||
|
||||
if crypto.Requested(formValues) {
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
|
||||
@@ -1246,8 +1319,15 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
wantSize := int64(-1)
|
||||
if actualSize >= 0 {
|
||||
info := ObjectInfo{Size: actualSize}
|
||||
wantSize = info.EncryptedSize()
|
||||
}
|
||||
|
||||
// do not try to verify encrypted content/
|
||||
hashReader, err = hash.NewReader(ctx, reader, -1, "", "", -1)
|
||||
hashReader, err = hash.NewReader(ctx, reader, wantSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1327,7 +1407,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
Key: objInfo.Name,
|
||||
Error: errs[i].Error(),
|
||||
})
|
||||
|
||||
eventArgsList = append(eventArgsList, eventArgs{
|
||||
EventName: event.ObjectCreatedPost,
|
||||
BucketName: objInfo.Bucket,
|
||||
@@ -1340,22 +1419,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
continue
|
||||
}
|
||||
|
||||
asize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
asize = objInfo.Size
|
||||
}
|
||||
|
||||
globalCacheConfig.Set(&cache.ObjectInfo{
|
||||
Key: objInfo.Name,
|
||||
Bucket: objInfo.Bucket,
|
||||
ETag: getDecryptedETag(formValues, objInfo, false),
|
||||
ModTime: objInfo.ModTime,
|
||||
Expires: objInfo.Expires.UTC().Format(http.TimeFormat),
|
||||
CacheControl: objInfo.CacheControl,
|
||||
Metadata: cleanReservedKeys(objInfo.UserDefined),
|
||||
Size: asize,
|
||||
})
|
||||
|
||||
fanOutResp = append(fanOutResp, minio.PutObjectFanOutResponse{
|
||||
Key: objInfo.Name,
|
||||
ETag: getDecryptedETag(formValues, objInfo, false),
|
||||
@@ -1415,6 +1478,19 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if formValues.Get(postPolicyBucketTagging) != "" {
|
||||
tags, err := tags.ParseObjectXML(strings.NewReader(formValues.Get(postPolicyBucketTagging)))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
||||
return
|
||||
}
|
||||
tagsStr := tags.String()
|
||||
opts.UserDefined[xhttp.AmzObjectTagging] = tagsStr
|
||||
} else {
|
||||
// avoid user set an invalid tag using `X-Amz-Tagging`
|
||||
delete(opts.UserDefined, xhttp.AmzObjectTagging)
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -1437,22 +1513,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
w.Header().Set(xhttp.Location, obj)
|
||||
}
|
||||
|
||||
asize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
asize = objInfo.Size
|
||||
}
|
||||
|
||||
defer globalCacheConfig.Set(&cache.ObjectInfo{
|
||||
Key: objInfo.Name,
|
||||
Bucket: objInfo.Bucket,
|
||||
ETag: etag,
|
||||
ModTime: objInfo.ModTime,
|
||||
Expires: objInfo.ExpiresStr(),
|
||||
CacheControl: objInfo.CacheControl,
|
||||
Metadata: cleanReservedKeys(objInfo.UserDefined),
|
||||
Size: asize,
|
||||
})
|
||||
|
||||
// Notify object created event.
|
||||
defer sendEvent(eventArgs{
|
||||
EventName: event.ObjectCreatedPost,
|
||||
@@ -1700,7 +1760,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
|
||||
globalReplicationPool.deleteResyncMetadata(ctx, bucket)
|
||||
globalReplicationPool.Get().deleteResyncMetadata(ctx, bucket)
|
||||
|
||||
// Call site replication hook.
|
||||
replLogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete))
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
|
||||
// Wrapper for calling RemoveBucket HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestRemoveBucketHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testRemoveBucketHandler, endpoints: []string{"RemoveBucket"}})
|
||||
}
|
||||
|
||||
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
@@ -78,7 +78,7 @@ func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
|
||||
// Wrapper for calling GetBucketPolicy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestGetBucketLocationHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testGetBucketLocationHandler, endpoints: []string{"GetBucketLocation"}})
|
||||
}
|
||||
|
||||
func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
@@ -220,7 +220,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
|
||||
// Wrapper for calling HeadBucket HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestHeadBucketHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testHeadBucketHandler, []string{"HeadBucket"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testHeadBucketHandler, endpoints: []string{"HeadBucket"}})
|
||||
}
|
||||
|
||||
func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
@@ -322,7 +322,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
|
||||
// Wrapper for calling TestListMultipartUploadsHandler tests for both Erasure multiple disks and single node setup.
|
||||
func TestListMultipartUploadsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testListMultipartUploadsHandler, []string{"ListMultipartUploads"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testListMultipartUploadsHandler, endpoints: []string{"ListMultipartUploads"}})
|
||||
}
|
||||
|
||||
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
|
||||
@@ -558,7 +558,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
|
||||
// Wrapper for calling TestListBucketsHandler tests for both Erasure multiple disks and single node setup.
|
||||
func TestListBucketsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testListBucketsHandler, []string{"ListBuckets"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testListBucketsHandler, endpoints: []string{"ListBuckets"}})
|
||||
}
|
||||
|
||||
// testListBucketsHandler - Tests validate listing of buckets.
|
||||
@@ -649,7 +649,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
|
||||
// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects", "PutBucketPolicy"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPIDeleteMultipleObjectsHandler, endpoints: []string{"DeleteMultipleObjects", "PutBucketPolicy"}})
|
||||
}
|
||||
|
||||
func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
|
||||
@@ -17,7 +17,11 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import "github.com/minio/minio/internal/bucket/lifecycle"
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
)
|
||||
|
||||
//go:generate stringer -type lcEventSrc -trimprefix lcEventSrc_ $GOFILE
|
||||
type lcEventSrc uint8
|
||||
@@ -43,7 +47,7 @@ type lcAuditEvent struct {
|
||||
source lcEventSrc
|
||||
}
|
||||
|
||||
func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
func (lae lcAuditEvent) Tags() map[string]string {
|
||||
event := lae.Event
|
||||
src := lae.source
|
||||
const (
|
||||
@@ -55,7 +59,7 @@ func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
ilmNewerNoncurrentVersions = "ilm-newer-noncurrent-versions"
|
||||
ilmNoncurrentDays = "ilm-noncurrent-days"
|
||||
)
|
||||
tags := make(map[string]interface{}, 5)
|
||||
tags := make(map[string]string, 5)
|
||||
if src > lcEventSrc_None {
|
||||
tags[ilmSrc] = src.String()
|
||||
}
|
||||
@@ -63,7 +67,7 @@ func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
tags[ilmRuleID] = event.RuleID
|
||||
|
||||
if !event.Due.IsZero() {
|
||||
tags[ilmDue] = event.Due
|
||||
tags[ilmDue] = event.Due.Format(iso8601Format)
|
||||
}
|
||||
|
||||
// rule with Transition/NoncurrentVersionTransition in effect
|
||||
@@ -73,10 +77,10 @@ func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
|
||||
// rule with NewernoncurrentVersions in effect
|
||||
if event.NewerNoncurrentVersions > 0 {
|
||||
tags[ilmNewerNoncurrentVersions] = event.NewerNoncurrentVersions
|
||||
tags[ilmNewerNoncurrentVersions] = strconv.Itoa(event.NewerNoncurrentVersions)
|
||||
}
|
||||
if event.NoncurrentDays > 0 {
|
||||
tags[ilmNoncurrentDays] = event.NoncurrentDays
|
||||
tags[ilmNoncurrentDays] = strconv.Itoa(event.NoncurrentDays)
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -64,7 +64,8 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
rcfg, err := globalBucketObjectLockSys.Get(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -76,7 +77,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Validate the received bucket policy document
|
||||
if err = bucketLifecycle.Validate(); err != nil {
|
||||
if err = bucketLifecycle.Validate(rcfg); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
|
||||
// Test S3 Bucket lifecycle APIs with wrong credentials
|
||||
func TestBucketLifecycleWrongCredentials(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testBucketLifecycleHandlersWrongCredentials, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testBucketLifecycleHandlersWrongCredentials, endpoints: []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"}})
|
||||
}
|
||||
|
||||
// Test for authentication
|
||||
@@ -145,7 +145,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
|
||||
|
||||
// Test S3 Bucket lifecycle APIs
|
||||
func TestBucketLifecycle(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testBucketLifecycleHandlers, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testBucketLifecycleHandlers, endpoints: []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"}})
|
||||
}
|
||||
|
||||
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
|
||||
|
||||
@@ -40,7 +40,7 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/s3select"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
"github.com/zeebo/xxh3"
|
||||
)
|
||||
|
||||
@@ -71,7 +71,8 @@ func NewLifecycleSys() *LifecycleSys {
|
||||
return &LifecycleSys{}
|
||||
}
|
||||
|
||||
func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event string) madmin.TraceInfo {
|
||||
func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event string, metadata map[string]string, err string) madmin.TraceInfo {
|
||||
sz, _ := oi.GetActualSize()
|
||||
return madmin.TraceInfo{
|
||||
TraceType: madmin.TraceILM,
|
||||
Time: startTime,
|
||||
@@ -79,18 +80,23 @@ func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event
|
||||
FuncName: event,
|
||||
Duration: duration,
|
||||
Path: pathJoin(oi.Bucket, oi.Name),
|
||||
Error: "",
|
||||
Bytes: sz,
|
||||
Error: err,
|
||||
Message: getSource(4),
|
||||
Custom: map[string]string{"version-id": oi.VersionID},
|
||||
Custom: metadata,
|
||||
}
|
||||
}
|
||||
|
||||
func (sys *LifecycleSys) trace(oi ObjectInfo) func(event string) {
|
||||
func (sys *LifecycleSys) trace(oi ObjectInfo) func(event string, metadata map[string]string, err error) {
|
||||
startTime := time.Now()
|
||||
return func(event string) {
|
||||
return func(event string, metadata map[string]string, err error) {
|
||||
duration := time.Since(startTime)
|
||||
if globalTrace.NumSubscribers(madmin.TraceILM) > 0 {
|
||||
globalTrace.Publish(ilmTrace(startTime, duration, oi, event))
|
||||
e := ""
|
||||
if err != nil {
|
||||
e = err.Error()
|
||||
}
|
||||
globalTrace.Publish(ilmTrace(startTime, duration, oi, event, metadata, e))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -277,6 +283,10 @@ func (es *expiryState) getWorkerCh(h uint64) chan<- expiryOp {
|
||||
}
|
||||
|
||||
func (es *expiryState) ResizeWorkers(n int) {
|
||||
if n == 0 {
|
||||
n = 100
|
||||
}
|
||||
|
||||
// Lock to avoid multiple resizes to happen at the same time.
|
||||
es.mu.Lock()
|
||||
defer es.mu.Unlock()
|
||||
@@ -342,7 +352,7 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
|
||||
traceFn := globalLifecycleSys.trace(oi)
|
||||
if !oi.TransitionedObject.FreeVersion {
|
||||
// nothing to be done
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
ignoreNotFoundErr := func(err error) error {
|
||||
@@ -356,7 +366,8 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
|
||||
err := deleteObjectFromRemoteTier(es.ctx, oi.TransitionedObject.Name, oi.TransitionedObject.VersionID, oi.TransitionedObject.Tier)
|
||||
if ignoreNotFoundErr(err) != nil {
|
||||
transitionLogIf(es.ctx, err)
|
||||
return
|
||||
traceFn(ILMFreeVersionDelete, nil, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove this free version
|
||||
@@ -538,6 +549,10 @@ func (t *transitionState) UpdateWorkers(n int) {
|
||||
}
|
||||
|
||||
func (t *transitionState) updateWorkers(n int) {
|
||||
if n == 0 {
|
||||
n = 100
|
||||
}
|
||||
|
||||
for t.numWorkers < n {
|
||||
go t.worker(t.objAPI)
|
||||
t.numWorkers++
|
||||
@@ -573,6 +588,10 @@ func enqueueTransitionImmediate(obj ObjectInfo, src lcEventSrc) {
|
||||
if lc, err := globalLifecycleSys.Get(obj.Bucket); err == nil {
|
||||
switch event := lc.Eval(obj.ToLifecycleOpts()); event.Action {
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
if obj.DeleteMarker || obj.IsDir {
|
||||
// nothing to transition
|
||||
return
|
||||
}
|
||||
globalTransitionState.queueTransitionTask(obj, event, src)
|
||||
}
|
||||
}
|
||||
@@ -663,11 +682,12 @@ func genTransitionObjName(bucket string) (string, error) {
|
||||
// is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved
|
||||
// to the transition tier without decrypting or re-encrypting.
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo, lae lcAuditEvent) (err error) {
|
||||
timeILM := globalScannerMetrics.timeILM(lae.Action)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
globalScannerMetrics.timeILM(lae.Action)(1)
|
||||
timeILM(1)
|
||||
}()
|
||||
|
||||
opts := ObjectOptions{
|
||||
@@ -692,6 +712,11 @@ type auditTierOp struct {
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (op auditTierOp) String() string {
|
||||
// flattening the auditTierOp{} for audit
|
||||
return fmt.Sprintf("tier:%s,respNS:%d,tx:%d,err:%s", op.Tier, op.TimeToResponseNS, op.OutputBytes, op.Error)
|
||||
}
|
||||
|
||||
func auditTierActions(ctx context.Context, tier string, bytes int64) func(err error) {
|
||||
startTime := time.Now()
|
||||
return func(err error) {
|
||||
@@ -715,7 +740,7 @@ func auditTierActions(ctx context.Context, tier string, bytes int64) func(err er
|
||||
globalTierMetrics.logFailure(tier)
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("tierStats", op)
|
||||
logger.GetReqInfo(ctx).AppendTags("tierStats", op.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -726,7 +751,7 @@ func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs
|
||||
return nil, fmt.Errorf("transition storage class not configured: %w", err)
|
||||
}
|
||||
|
||||
fn, off, length, err := NewGetObjectReader(rs, oi, opts)
|
||||
fn, off, length, err := NewGetObjectReader(rs, oi, opts, h)
|
||||
if err != nil {
|
||||
return nil, ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
@@ -983,7 +1008,7 @@ func ongoingRestoreObj() restoreObjStatus {
|
||||
}
|
||||
}
|
||||
|
||||
// completeRestoreObj constructs restoreObjStatus for a completed restore-object with given expiry.
|
||||
// completedRestoreObj constructs restoreObjStatus for a completed restore-object with given expiry.
|
||||
func completedRestoreObj(expiry time.Time) restoreObjStatus {
|
||||
return restoreObjStatus{
|
||||
ongoing: false,
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
@@ -124,7 +124,7 @@ func (api objectAPIHandlers) listObjectVersionsHandler(w http.ResponseWriter, r
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo, checkObjMeta)
|
||||
response := generateListVersionsResponse(ctx, bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo, checkObjMeta)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponseList(response))
|
||||
@@ -202,7 +202,7 @@ func (api objectAPIHandlers) listObjectsV2Handler(ctx context.Context, w http.Re
|
||||
|
||||
if r.Header.Get(xMinIOExtract) == "true" && strings.Contains(prefix, archivePattern) {
|
||||
// Initiate a list objects operation inside a zip file based in the input params
|
||||
listObjectsV2Info, err = listObjectsV2InArchive(ctx, objectAPI, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
|
||||
listObjectsV2Info, err = listObjectsV2InArchive(ctx, objectAPI, bucket, prefix, token, delimiter, maxKeys, startAfter, r.Header)
|
||||
} else {
|
||||
// Initiate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
@@ -219,7 +219,7 @@ func (api objectAPIHandlers) listObjectsV2Handler(ctx context.Context, w http.Re
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
response := generateListObjectsV2Response(ctx, bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, checkObjMeta)
|
||||
|
||||
@@ -231,7 +231,7 @@ func parseRequestToken(token string) (subToken string, nodeIndex int) {
|
||||
if token == "" {
|
||||
return token, -1
|
||||
}
|
||||
i := strings.Index(token, ":")
|
||||
i := strings.Index(token, getKeySeparator())
|
||||
if i < 0 {
|
||||
return token, -1
|
||||
}
|
||||
@@ -318,7 +318,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
response := generateListObjectsV1Response(ctx, bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponseList(response))
|
||||
|
||||
@@ -37,8 +37,9 @@ import (
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/sync/errgroup"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
"golang.org/x/sync/singleflight"
|
||||
)
|
||||
|
||||
// BucketMetadataSys captures all bucket metadata for a given cluster.
|
||||
@@ -47,6 +48,7 @@ type BucketMetadataSys struct {
|
||||
|
||||
sync.RWMutex
|
||||
initialized bool
|
||||
group *singleflight.Group
|
||||
metadataMap map[string]BucketMetadata
|
||||
}
|
||||
|
||||
@@ -62,6 +64,7 @@ func (sys *BucketMetadataSys) Count() int {
|
||||
func (sys *BucketMetadataSys) Remove(buckets ...string) {
|
||||
sys.Lock()
|
||||
for _, bucket := range buckets {
|
||||
sys.group.Forget(bucket)
|
||||
delete(sys.metadataMap, bucket)
|
||||
globalBucketMonitor.DeleteBucket(bucket)
|
||||
}
|
||||
@@ -121,6 +124,7 @@ func (sys *BucketMetadataSys) updateAndParse(ctx context.Context, bucket string,
|
||||
meta.PolicyConfigUpdatedAt = updatedAt
|
||||
case bucketNotificationConfig:
|
||||
meta.NotificationConfigXML = configData
|
||||
meta.NotificationConfigUpdatedAt = updatedAt
|
||||
case bucketLifecycleConfig:
|
||||
meta.LifecycleConfigXML = configData
|
||||
meta.LifecycleConfigUpdatedAt = updatedAt
|
||||
@@ -150,12 +154,13 @@ func (sys *BucketMetadataSys) updateAndParse(ctx context.Context, bucket string,
|
||||
if err != nil {
|
||||
return updatedAt, fmt.Errorf("Error encrypting bucket target metadata %w", err)
|
||||
}
|
||||
meta.BucketTargetsConfigUpdatedAt = updatedAt
|
||||
meta.BucketTargetsConfigMetaUpdatedAt = updatedAt
|
||||
default:
|
||||
return updatedAt, fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
}
|
||||
|
||||
err = sys.save(ctx, meta)
|
||||
return updatedAt, err
|
||||
return updatedAt, sys.save(ctx, meta)
|
||||
}
|
||||
|
||||
func (sys *BucketMetadataSys) save(ctx context.Context, meta BucketMetadata) error {
|
||||
@@ -262,6 +267,21 @@ func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Ve
|
||||
return meta.versioningConfig, meta.VersioningConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetBucketPolicy returns configured bucket policy
|
||||
func (sys *BucketMetadataSys) GetBucketPolicy(bucket string) (*policy.BucketPolicy, time.Time, error) {
|
||||
meta, _, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.policyConfig == nil {
|
||||
return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.policyConfig, meta.PolicyConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetTaggingConfig returns configured tagging config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, time.Time, error) {
|
||||
@@ -392,9 +412,7 @@ func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket s
|
||||
return nil, time.Time{}, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
if reloaded {
|
||||
globalBucketTargetSys.set(BucketInfo{
|
||||
Name: bucket,
|
||||
}, meta)
|
||||
globalBucketTargetSys.set(bucket, meta)
|
||||
}
|
||||
return meta.replicationConfig, meta.ReplicationConfigUpdatedAt, nil
|
||||
}
|
||||
@@ -413,9 +431,7 @@ func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.Buc
|
||||
return nil, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
if reloaded {
|
||||
globalBucketTargetSys.set(BucketInfo{
|
||||
Name: bucket,
|
||||
}, meta)
|
||||
globalBucketTargetSys.set(bucket, meta)
|
||||
}
|
||||
return meta.bucketTargetConfig, nil
|
||||
}
|
||||
@@ -455,13 +471,20 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
|
||||
if ok {
|
||||
return meta, reloaded, nil
|
||||
}
|
||||
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
if !sys.Initialized() {
|
||||
// bucket metadata not yet initialized
|
||||
return newBucketMetadata(bucket), reloaded, errBucketMetadataNotInitialized
|
||||
|
||||
val, err, _ := sys.group.Do(bucket, func() (val interface{}, err error) {
|
||||
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
if !sys.Initialized() {
|
||||
// bucket metadata not yet initialized
|
||||
return newBucketMetadata(bucket), errBucketMetadataNotInitialized
|
||||
}
|
||||
}
|
||||
return meta, reloaded, err
|
||||
return meta, err
|
||||
})
|
||||
meta, _ = val.(BucketMetadata)
|
||||
if err != nil {
|
||||
return meta, false, err
|
||||
}
|
||||
sys.Lock()
|
||||
sys.metadataMap[bucket] = meta
|
||||
@@ -471,7 +494,7 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
|
||||
}
|
||||
|
||||
// Init - initializes bucket metadata system for all buckets.
|
||||
func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []string, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -484,7 +507,7 @@ func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, ob
|
||||
}
|
||||
|
||||
// concurrently load bucket metadata to speed up loading bucket metadata.
|
||||
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo, failedBuckets map[string]struct{}) {
|
||||
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []string) {
|
||||
g := errgroup.WithNErrs(len(buckets))
|
||||
bucketMetas := make([]BucketMetadata, len(buckets))
|
||||
for index := range buckets {
|
||||
@@ -494,8 +517,8 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
// herd upon start up sequence.
|
||||
time.Sleep(25*time.Millisecond + time.Duration(rand.Int63n(int64(100*time.Millisecond))))
|
||||
|
||||
_, _ = sys.objAPI.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{Recreate: true})
|
||||
meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[index].Name)
|
||||
_, _ = sys.objAPI.HealBucket(ctx, buckets[index], madmin.HealOpts{Recreate: true})
|
||||
meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[index])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -508,7 +531,7 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
for index, err := range errs {
|
||||
if err != nil {
|
||||
internalLogOnceIf(ctx, fmt.Errorf("Unable to load bucket metadata, will be retried: %w", err),
|
||||
"load-bucket-metadata-"+buckets[index].Name, logger.WarningKind)
|
||||
"load-bucket-metadata-"+buckets[index], logger.WarningKind)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -519,16 +542,12 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
if errs[i] != nil {
|
||||
continue
|
||||
}
|
||||
sys.metadataMap[buckets[i].Name] = meta
|
||||
sys.metadataMap[buckets[i]] = meta
|
||||
}
|
||||
sys.Unlock()
|
||||
|
||||
for i, meta := range bucketMetas {
|
||||
if errs[i] != nil {
|
||||
if failedBuckets == nil {
|
||||
failedBuckets = make(map[string]struct{})
|
||||
}
|
||||
failedBuckets[buckets[i].Name] = struct{}{}
|
||||
continue
|
||||
}
|
||||
globalEventNotifier.set(buckets[i], meta) // set notification targets
|
||||
@@ -536,7 +555,7 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
}
|
||||
}
|
||||
|
||||
func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, failedBuckets map[string]struct{}) {
|
||||
func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context) {
|
||||
const bucketMetadataRefresh = 15 * time.Minute
|
||||
|
||||
sleeper := newDynamicSleeper(2, 150*time.Millisecond, false)
|
||||
@@ -548,7 +567,7 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
buckets, err := sys.objAPI.ListBuckets(ctx, BucketOptions{})
|
||||
buckets, err := sys.objAPI.ListBuckets(ctx, BucketOptions{NoMetadata: true})
|
||||
if err != nil {
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
break
|
||||
@@ -566,7 +585,10 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
|
||||
for i := range buckets {
|
||||
wait := sleeper.Timer(ctx)
|
||||
|
||||
meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[i].Name)
|
||||
bucket := buckets[i].Name
|
||||
updated := false
|
||||
|
||||
meta, err := loadBucketMetadata(ctx, sys.objAPI, bucket)
|
||||
if err != nil {
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
wait() // wait to proceed to next entry.
|
||||
@@ -574,14 +596,16 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
sys.metadataMap[buckets[i].Name] = meta
|
||||
// Update if the bucket metadata in the memory is older than on-disk one
|
||||
if lu := sys.metadataMap[bucket].lastUpdate(); lu.Before(meta.lastUpdate()) {
|
||||
updated = true
|
||||
sys.metadataMap[bucket] = meta
|
||||
}
|
||||
sys.Unlock()
|
||||
|
||||
// Initialize the failed buckets
|
||||
if _, ok := failedBuckets[buckets[i].Name]; ok {
|
||||
globalEventNotifier.set(buckets[i], meta)
|
||||
globalBucketTargetSys.set(buckets[i], meta)
|
||||
delete(failedBuckets, buckets[i].Name)
|
||||
if updated {
|
||||
globalEventNotifier.set(bucket, meta)
|
||||
globalBucketTargetSys.set(bucket, meta)
|
||||
}
|
||||
|
||||
wait() // wait to proceed to next entry.
|
||||
@@ -600,15 +624,14 @@ func (sys *BucketMetadataSys) Initialized() bool {
|
||||
}
|
||||
|
||||
// Loads bucket metadata for all buckets into BucketMetadataSys.
|
||||
func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
|
||||
count := 100 // load 100 bucket metadata at a time.
|
||||
failedBuckets := make(map[string]struct{})
|
||||
func (sys *BucketMetadataSys) init(ctx context.Context, buckets []string) {
|
||||
count := globalEndpoints.ESCount() * 10
|
||||
for {
|
||||
if len(buckets) < count {
|
||||
sys.concurrentLoad(ctx, buckets, failedBuckets)
|
||||
sys.concurrentLoad(ctx, buckets)
|
||||
break
|
||||
}
|
||||
sys.concurrentLoad(ctx, buckets[:count], failedBuckets)
|
||||
sys.concurrentLoad(ctx, buckets[:count])
|
||||
buckets = buckets[count:]
|
||||
}
|
||||
|
||||
@@ -617,7 +640,7 @@ func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
|
||||
sys.Unlock()
|
||||
|
||||
if globalIsDistErasure {
|
||||
go sys.refreshBucketsMetadataLoop(ctx, failedBuckets)
|
||||
go sys.refreshBucketsMetadataLoop(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -634,5 +657,6 @@ func (sys *BucketMetadataSys) Reset() {
|
||||
func NewBucketMetadataSys() *BucketMetadataSys {
|
||||
return &BucketMetadataSys{
|
||||
metadataMap: make(map[string]BucketMetadata),
|
||||
group: &singleflight.Group{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ import (
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
@@ -81,14 +81,19 @@ type BucketMetadata struct {
|
||||
ReplicationConfigXML []byte
|
||||
BucketTargetsConfigJSON []byte
|
||||
BucketTargetsConfigMetaJSON []byte
|
||||
PolicyConfigUpdatedAt time.Time
|
||||
ObjectLockConfigUpdatedAt time.Time
|
||||
EncryptionConfigUpdatedAt time.Time
|
||||
TaggingConfigUpdatedAt time.Time
|
||||
QuotaConfigUpdatedAt time.Time
|
||||
ReplicationConfigUpdatedAt time.Time
|
||||
VersioningConfigUpdatedAt time.Time
|
||||
LifecycleConfigUpdatedAt time.Time
|
||||
|
||||
PolicyConfigUpdatedAt time.Time
|
||||
ObjectLockConfigUpdatedAt time.Time
|
||||
EncryptionConfigUpdatedAt time.Time
|
||||
TaggingConfigUpdatedAt time.Time
|
||||
QuotaConfigUpdatedAt time.Time
|
||||
ReplicationConfigUpdatedAt time.Time
|
||||
VersioningConfigUpdatedAt time.Time
|
||||
LifecycleConfigUpdatedAt time.Time
|
||||
NotificationConfigUpdatedAt time.Time
|
||||
BucketTargetsConfigUpdatedAt time.Time
|
||||
BucketTargetsConfigMetaUpdatedAt time.Time
|
||||
// Add a new UpdatedAt field and update lastUpdate function
|
||||
|
||||
// Unexported fields. Must be updated atomically.
|
||||
policyConfig *policy.BucketPolicy
|
||||
@@ -120,6 +125,46 @@ func newBucketMetadata(name string) BucketMetadata {
|
||||
}
|
||||
}
|
||||
|
||||
// Return the last update of this bucket metadata, which
|
||||
// means, the last update of any policy document.
|
||||
func (b BucketMetadata) lastUpdate() (t time.Time) {
|
||||
if b.PolicyConfigUpdatedAt.After(t) {
|
||||
t = b.PolicyConfigUpdatedAt
|
||||
}
|
||||
if b.ObjectLockConfigUpdatedAt.After(t) {
|
||||
t = b.ObjectLockConfigUpdatedAt
|
||||
}
|
||||
if b.EncryptionConfigUpdatedAt.After(t) {
|
||||
t = b.EncryptionConfigUpdatedAt
|
||||
}
|
||||
if b.TaggingConfigUpdatedAt.After(t) {
|
||||
t = b.TaggingConfigUpdatedAt
|
||||
}
|
||||
if b.QuotaConfigUpdatedAt.After(t) {
|
||||
t = b.QuotaConfigUpdatedAt
|
||||
}
|
||||
if b.ReplicationConfigUpdatedAt.After(t) {
|
||||
t = b.ReplicationConfigUpdatedAt
|
||||
}
|
||||
if b.VersioningConfigUpdatedAt.After(t) {
|
||||
t = b.VersioningConfigUpdatedAt
|
||||
}
|
||||
if b.LifecycleConfigUpdatedAt.After(t) {
|
||||
t = b.LifecycleConfigUpdatedAt
|
||||
}
|
||||
if b.NotificationConfigUpdatedAt.After(t) {
|
||||
t = b.NotificationConfigUpdatedAt
|
||||
}
|
||||
if b.BucketTargetsConfigUpdatedAt.After(t) {
|
||||
t = b.BucketTargetsConfigUpdatedAt
|
||||
}
|
||||
if b.BucketTargetsConfigMetaUpdatedAt.After(t) {
|
||||
t = b.BucketTargetsConfigMetaUpdatedAt
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Versioning returns true if versioning is enabled
|
||||
func (b BucketMetadata) Versioning() bool {
|
||||
return b.LockEnabled || (b.versioningConfig != nil && b.versioningConfig.Enabled()) || (b.objectLockConfig != nil && b.objectLockConfig.Enabled())
|
||||
@@ -440,6 +485,18 @@ func (b *BucketMetadata) defaultTimestamps() {
|
||||
if b.LifecycleConfigUpdatedAt.IsZero() {
|
||||
b.LifecycleConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.NotificationConfigUpdatedAt.IsZero() {
|
||||
b.NotificationConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.BucketTargetsConfigUpdatedAt.IsZero() {
|
||||
b.BucketTargetsConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.BucketTargetsConfigMetaUpdatedAt.IsZero() {
|
||||
b.BucketTargetsConfigMetaUpdatedAt = b.Created
|
||||
}
|
||||
}
|
||||
|
||||
// Save config to supplied ObjectLayer api.
|
||||
@@ -490,7 +547,7 @@ func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kms
|
||||
}
|
||||
|
||||
metadata := make(map[string]string)
|
||||
key, err := GlobalKMS.GenerateKey(ctx, "", kmsContext)
|
||||
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{AssociatedData: kmsContext})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -519,7 +576,11 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extKey, err := GlobalKMS.DecryptKey(keyID, kmsKey, kmsContext)
|
||||
extKey, err := GlobalKMS.Decrypt(context.TODO(), &kms.DecryptRequest{
|
||||
Name: keyID,
|
||||
Ciphertext: kmsKey,
|
||||
AssociatedData: kmsContext,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -156,6 +156,24 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "LifecycleConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "NotificationConfigUpdatedAt":
|
||||
z.NotificationConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "BucketTargetsConfigUpdatedAt":
|
||||
z.BucketTargetsConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "BucketTargetsConfigMetaUpdatedAt":
|
||||
z.BucketTargetsConfigMetaUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaUpdatedAt")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -169,9 +187,9 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 22
|
||||
// map header, size 25
|
||||
// write "Name"
|
||||
err = en.Append(0xde, 0x0, 0x16, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
err = en.Append(0xde, 0x0, 0x19, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -390,15 +408,45 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "LifecycleConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "NotificationConfigUpdatedAt"
|
||||
err = en.Append(0xbb, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.NotificationConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "BucketTargetsConfigUpdatedAt"
|
||||
err = en.Append(0xbc, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.BucketTargetsConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "BucketTargetsConfigMetaUpdatedAt"
|
||||
err = en.Append(0xd9, 0x20, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.BucketTargetsConfigMetaUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaUpdatedAt")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 22
|
||||
// map header, size 25
|
||||
// string "Name"
|
||||
o = append(o, 0xde, 0x0, 0x16, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = append(o, 0xde, 0x0, 0x19, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
// string "Created"
|
||||
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
|
||||
@@ -463,6 +511,15 @@ func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "LifecycleConfigUpdatedAt"
|
||||
o = append(o, 0xb8, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.LifecycleConfigUpdatedAt)
|
||||
// string "NotificationConfigUpdatedAt"
|
||||
o = append(o, 0xbb, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.NotificationConfigUpdatedAt)
|
||||
// string "BucketTargetsConfigUpdatedAt"
|
||||
o = append(o, 0xbc, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.BucketTargetsConfigUpdatedAt)
|
||||
// string "BucketTargetsConfigMetaUpdatedAt"
|
||||
o = append(o, 0xd9, 0x20, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.BucketTargetsConfigMetaUpdatedAt)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -616,6 +673,24 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "LifecycleConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "NotificationConfigUpdatedAt":
|
||||
z.NotificationConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "BucketTargetsConfigUpdatedAt":
|
||||
z.BucketTargetsConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "BucketTargetsConfigMetaUpdatedAt":
|
||||
z.BucketTargetsConfigMetaUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaUpdatedAt")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -630,6 +705,6 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BucketMetadata) Msgsize() (s int) {
|
||||
s = 3 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON) + 22 + msgp.TimeSize + 26 + msgp.TimeSize + 26 + msgp.TimeSize + 23 + msgp.TimeSize + 21 + msgp.TimeSize + 27 + msgp.TimeSize + 26 + msgp.TimeSize + 25 + msgp.TimeSize
|
||||
s = 3 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON) + 22 + msgp.TimeSize + 26 + msgp.TimeSize + 26 + msgp.TimeSize + 23 + msgp.TimeSize + 21 + msgp.TimeSize + 27 + msgp.TimeSize + 26 + msgp.TimeSize + 25 + msgp.TimeSize + 28 + msgp.TimeSize + 29 + msgp.TimeSize + 34 + msgp.TimeSize
|
||||
return
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -66,8 +66,9 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
config.SetRegion(globalSite.Region)
|
||||
if err = config.Validate(globalSite.Region, globalEventNotifier.targetList); err != nil {
|
||||
region := globalSite.Region()
|
||||
config.SetRegion(region)
|
||||
if err = config.Validate(region, globalEventNotifier.targetList); err != nil {
|
||||
arnErr, ok := err.(*event.ErrARNNotFound)
|
||||
if ok {
|
||||
for i, queue := range config.QueueList {
|
||||
@@ -134,7 +135,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region, globalEventNotifier.targetList)
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region(), globalEventNotifier.targetList)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
if event.IsEventError(err) {
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// BucketObjectLockSys - map of bucket and retention configuration.
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -29,8 +29,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/policy/condition"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/policy/condition"
|
||||
)
|
||||
|
||||
func getAnonReadOnlyBucketPolicy(bucketName string) *policy.BucketPolicy {
|
||||
@@ -107,7 +107,7 @@ func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.BucketPolic
|
||||
|
||||
// Wrapper for calling Create Bucket and ensure we get one and only one success.
|
||||
func TestCreateBucket(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testCreateBucket, []string{"MakeBucket"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testCreateBucket, endpoints: []string{"MakeBucket"}})
|
||||
}
|
||||
|
||||
// testCreateBucket - Test for calling Create Bucket and ensure we get one and only one success.
|
||||
@@ -154,7 +154,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute
|
||||
|
||||
// Wrapper for calling Put Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestPutBucketPolicyHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testPutBucketPolicyHandler, []string{"PutBucketPolicy"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testPutBucketPolicyHandler, endpoints: []string{"PutBucketPolicy"}})
|
||||
}
|
||||
|
||||
// testPutBucketPolicyHandler - Test for Bucket policy end point.
|
||||
@@ -373,7 +373,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
|
||||
// Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestGetBucketPolicyHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testGetBucketPolicyHandler, []string{"PutBucketPolicy", "GetBucketPolicy"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testGetBucketPolicyHandler, endpoints: []string{"PutBucketPolicy", "GetBucketPolicy"}})
|
||||
}
|
||||
|
||||
// testGetBucketPolicyHandler - Test for end point which fetches the access policy json of the given bucket.
|
||||
@@ -577,7 +577,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
|
||||
// Wrapper for calling Delete Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestDeleteBucketPolicyHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testDeleteBucketPolicyHandler, []string{"PutBucketPolicy", "DeleteBucketPolicy"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testDeleteBucketPolicyHandler, endpoints: []string{"PutBucketPolicy", "DeleteBucketPolicy"}})
|
||||
}
|
||||
|
||||
// testDeleteBucketPolicyHandler - Test for Delete bucket policy end point.
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// PolicySys - policy subsystem.
|
||||
|
||||
@@ -49,8 +49,11 @@ var bucketStorageCache = cachevalue.New[DataUsageInfo]()
|
||||
func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
||||
bucketStorageCache.InitOnce(10*time.Second,
|
||||
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
|
||||
func() (DataUsageInfo, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
func(ctx context.Context) (DataUsageInfo, error) {
|
||||
if objAPI == nil {
|
||||
return DataUsageInfo{}, errServerNotInitialized
|
||||
}
|
||||
ctx, done := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
@@ -59,8 +62,10 @@ func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
||||
}
|
||||
|
||||
// GetBucketUsageInfo return bucket usage info for a given bucket
|
||||
func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, error) {
|
||||
dui, err := bucketStorageCache.Get()
|
||||
func (sys *BucketQuotaSys) GetBucketUsageInfo(ctx context.Context, bucket string) BucketUsageInfo {
|
||||
sys.Init(newObjectLayerFn())
|
||||
|
||||
dui, err := bucketStorageCache.GetWithCtx(ctx)
|
||||
timedout := OperationTimedOut{}
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &timedout) {
|
||||
if len(dui.BucketsUsage) > 0 {
|
||||
@@ -73,10 +78,10 @@ func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, e
|
||||
if len(dui.BucketsUsage) > 0 {
|
||||
bui, ok := dui.BucketsUsage[bucket]
|
||||
if ok {
|
||||
return bui, nil
|
||||
return bui
|
||||
}
|
||||
}
|
||||
return BucketUsageInfo{}, nil
|
||||
return BucketUsageInfo{}
|
||||
}
|
||||
|
||||
// parseBucketQuota parses BucketQuota from json
|
||||
@@ -118,11 +123,7 @@ func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string,
|
||||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
}
|
||||
|
||||
bui, err := sys.GetBucketUsageInfo(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bui := sys.GetBucketUsageInfo(ctx, bucket)
|
||||
if bui.Size > 0 && ((bui.Size + uint64(size)) >= quotaSize) {
|
||||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user