Compare commits
282 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
73dde66dbe | ||
|
|
e30c0e7ca3 | ||
|
|
8fc200c0cc | ||
|
|
708296ae1b | ||
|
|
fbb5e75e01 | ||
|
|
f327b21557 | ||
|
|
45b7253f39 | ||
|
|
05bb655efc | ||
|
|
8fdfcfb562 | ||
|
|
e7c144eeac | ||
|
|
e98172d72d | ||
|
|
f2d063e7b9 | ||
|
|
a50f26b7f5 | ||
|
|
69294cf98a | ||
|
|
c397fb6c7a | ||
|
|
961b0b524e | ||
|
|
860fc200b0 | ||
|
|
109a9e3f35 | ||
|
|
5f971fea6e | ||
|
|
0d7abe3b9f | ||
|
|
94fbcd8ebe | ||
|
|
879d5dd236 | ||
|
|
34187e047d | ||
|
|
0ee722f8c3 | ||
|
|
b7d11141e1 | ||
|
|
e9babf3dac | ||
|
|
0bb81f2e9c | ||
|
|
bea0b050cd | ||
|
|
ce62980d4e | ||
|
|
dc88865908 | ||
|
|
9fbd931058 | ||
|
|
b0264bdb90 | ||
|
|
95d6f43cc8 | ||
|
|
9cb94eb4a9 | ||
|
|
bd0819330d | ||
|
|
8d9e83fd99 | ||
|
|
be02333529 | ||
|
|
506f121576 | ||
|
|
ca488cce87 | ||
|
|
11dc723324 | ||
|
|
dd6ea18901 | ||
|
|
3369eeb920 | ||
|
|
9032f49f25 | ||
|
|
fbc6f3f6e8 | ||
|
|
fba883839d | ||
|
|
a93214ea63 | ||
|
|
e6b0fc465b | ||
|
|
70fbcfee4a | ||
|
|
0b074d0fae | ||
|
|
d67e4d5b17 | ||
|
|
891c60d83d | ||
|
|
fe3e49c4eb | ||
|
|
58306a9d34 | ||
|
|
41091d9472 | ||
|
|
a4cfb5e1ed | ||
|
|
51aa59a737 | ||
|
|
8bedb419a9 | ||
|
|
f56a182b71 | ||
|
|
317b40ef90 | ||
|
|
e938ece492 | ||
|
|
02331a612c | ||
|
|
8317557f70 | ||
|
|
1bb7a2a295 | ||
|
|
215ca58d6a | ||
|
|
12f570a307 | ||
|
|
e4b619ce1a | ||
|
|
0a286153bb | ||
|
|
22d59e757d | ||
|
|
0daa2dbf59 | ||
|
|
96c2304ae8 | ||
|
|
343dd2f491 | ||
|
|
38f35463b7 | ||
|
|
5573986e8e | ||
|
|
f3367a1b20 | ||
|
|
a3c2f7b0e8 | ||
|
|
8fbec30998 | ||
|
|
a7466eeb0e | ||
|
|
8b1e819bf3 | ||
|
|
fe63664164 | ||
|
|
4598827dcb | ||
|
|
9afdb05bf4 | ||
|
|
9569a85cee | ||
|
|
54721b7c7b | ||
|
|
91d8bddbd1 | ||
|
|
80adc87a14 | ||
|
|
117ad1b65b | ||
|
|
2229509362 | ||
|
|
6ef8e87492 | ||
|
|
0a25083fdb | ||
|
|
15137d0327 | ||
|
|
8c9974bc0f | ||
|
|
079b6c2b50 | ||
|
|
0924b34a17 | ||
|
|
754f7a8a39 | ||
|
|
64bafe1dfe | ||
|
|
c3e456e7e6 | ||
|
|
57aaeafd2f | ||
|
|
3c2e1a87e2 | ||
|
|
da95a2d13f | ||
|
|
cc5e05fdeb | ||
|
|
a79c390cca | ||
|
|
8a56af439c | ||
|
|
f6e581ce54 | ||
|
|
8953f88780 | ||
|
|
4b4a98d5e5 | ||
|
|
7472818d94 | ||
|
|
ad44fe8d3e | ||
|
|
55e713db0a | ||
|
|
33322e6638 | ||
|
|
a1792ca0d1 | ||
|
|
ac8c43fe9c | ||
|
|
4d40ee00e9 | ||
|
|
06f59ad631 | ||
|
|
877e0cac03 | ||
|
|
ef67c39910 | ||
|
|
508710f4d1 | ||
|
|
3aa3d9cf14 | ||
|
|
c2fedb4c3f | ||
|
|
03dc65e12d | ||
|
|
b8d62a8068 | ||
|
|
dbc2368a7b | ||
|
|
54aed421b8 | ||
|
|
d5e8dac1cf | ||
|
|
96ec8fcba1 | ||
|
|
0663eb69ed | ||
|
|
0594d37230 | ||
|
|
3cc30bcc18 | ||
|
|
99c1a642a4 | ||
|
|
c60f54e5be | ||
|
|
483389f2e2 | ||
|
|
c0f2f84285 | ||
|
|
069d118329 | ||
|
|
a7b1834772 | ||
|
|
6415dec37a | ||
|
|
74253e1ddc | ||
|
|
01b3fb91e5 | ||
|
|
2dc917e87f | ||
|
|
0a284a1a10 | ||
|
|
5c8339e1e8 | ||
|
|
fd37418da2 | ||
|
|
bbfea29c2b | ||
|
|
aa703dc903 | ||
|
|
8cd80fec8c | ||
|
|
780882efcf | ||
|
|
c5636143c6 | ||
|
|
ba6218b354 | ||
|
|
8e32de3ba9 | ||
|
|
e37508fb8f | ||
|
|
b46a717425 | ||
|
|
7926df0b80 | ||
|
|
557df666fd | ||
|
|
f91b257f50 | ||
|
|
28a2d1eb3d | ||
|
|
a0ae1489e5 | ||
|
|
edfb310a59 | ||
|
|
a2312028b9 | ||
|
|
78f1f69d57 | ||
|
|
e1e33077e8 | ||
|
|
b3e7de010d | ||
|
|
f5b04865f4 | ||
|
|
bf1c6edb76 | ||
|
|
2ac7fee017 | ||
|
|
128256e3ab | ||
|
|
a66a7f3e97 | ||
|
|
20b79f8945 | ||
|
|
9a877734b2 | ||
|
|
409c391850 | ||
|
|
763ff085a6 | ||
|
|
5b9656374c | ||
|
|
b32014549c | ||
|
|
9476d212bc | ||
|
|
000928d34e | ||
|
|
6829ae5b13 | ||
|
|
f09756443d | ||
|
|
5512016885 | ||
|
|
21ecb941fe | ||
|
|
77e94087cf | ||
|
|
9ab1f25a47 | ||
|
|
aaab7aefbe | ||
|
|
5b8599e52d | ||
|
|
74e0c9ab9b | ||
|
|
dcce83b288 | ||
|
|
f731e7ea36 | ||
|
|
ec30bb89a4 | ||
|
|
7cd08594f6 | ||
|
|
2b4531f069 | ||
|
|
11544a62aa | ||
|
|
18550387d5 | ||
|
|
efb03e19e6 | ||
|
|
c27d0583d4 | ||
|
|
0de2b9a1b2 | ||
|
|
9dc29d7687 | ||
|
|
72871dbb9a | ||
|
|
4bda4e4e2b | ||
|
|
1971c54a50 | ||
|
|
bb77b89da0 | ||
|
|
b336e9a79f | ||
|
|
a2ab21e91c | ||
|
|
603437e70f | ||
|
|
db3a9a5990 | ||
|
|
24c7e73b4e | ||
|
|
c053e57068 | ||
|
|
6d20ec3bea | ||
|
|
c50627ee3e | ||
|
|
b3cd893f93 | ||
|
|
22d2dbc4e6 | ||
|
|
2b5d9428b1 | ||
|
|
d6446cb096 | ||
|
|
c34bdc33fb | ||
|
|
dd8547e51c | ||
|
|
aec023f537 | ||
|
|
e101eeeda9 | ||
|
|
f29522269d | ||
|
|
3c470a6b8b | ||
|
|
6bc7d711b3 | ||
|
|
10d5dd3a67 | ||
|
|
d9f1df01eb | ||
|
|
cdeab19673 | ||
|
|
22ee678136 | ||
|
|
50a8f13e85 | ||
|
|
6dec60b6e6 | ||
|
|
ac3a19138a | ||
|
|
21e8e071d7 | ||
|
|
57f84a8b4c | ||
|
|
8a672e70a7 | ||
|
|
22041bbcc4 | ||
|
|
5afb459113 | ||
|
|
91ebac0a00 | ||
|
|
5fcb1cfd31 | ||
|
|
3a90fb108c | ||
|
|
4eeb48f8e0 | ||
|
|
373d48c8a3 | ||
|
|
1472875670 | ||
|
|
74cfb207c1 | ||
|
|
6a096e7dc7 | ||
|
|
9788d85ea3 | ||
|
|
69c0e18685 | ||
|
|
3cac927348 | ||
|
|
9081346c40 | ||
|
|
fcfadb0e51 | ||
|
|
2add57cfed | ||
|
|
c5279ec630 | ||
|
|
b73699fad8 | ||
|
|
b8ebe54e53 | ||
|
|
c3d70e0795 | ||
|
|
8c4561b8da | ||
|
|
fd421ddd6f | ||
|
|
9947c01c8e | ||
|
|
a00db4267c | ||
|
|
36385010f5 | ||
|
|
fa6d082bfd | ||
|
|
9fab91852a | ||
|
|
b733e6e83c | ||
|
|
ce05bb69dc | ||
|
|
37aa5934a1 | ||
|
|
1647fc7edc | ||
|
|
7b92687397 | ||
|
|
419e5baf16 | ||
|
|
dc48cd841a | ||
|
|
b0e1776d6d | ||
|
|
7a7068ee47 | ||
|
|
cbc0ef459b | ||
|
|
a2aabfabd9 | ||
|
|
822cbd4b43 | ||
|
|
3c19a9308d | ||
|
|
32890342ce | ||
|
|
ed2c2a285f | ||
|
|
18e23bafd9 | ||
|
|
8b8be2695f | ||
|
|
96fbf18201 | ||
|
|
c8a57a8fa2 | ||
|
|
b1c2dacab3 | ||
|
|
65939913b4 | ||
|
|
08b3a466e8 | ||
|
|
5aa7c38035 | ||
|
|
1df5e31706 | ||
|
|
9f7044aed0 | ||
|
|
41de53996b | ||
|
|
9878031cfd | ||
|
|
e3fbcaeb72 | ||
|
|
ca6dd8be5e | ||
|
|
fba0924b1d |
@@ -1,10 +1,17 @@
|
||||
.git
|
||||
.github
|
||||
docs
|
||||
default.etcd
|
||||
*.gz
|
||||
*.tar.gz
|
||||
*.bzip2
|
||||
*.zip
|
||||
browser/node_modules
|
||||
node_modules
|
||||
node_modules
|
||||
docs/debugging/s3-verify/s3-verify
|
||||
docs/debugging/xl-meta/xl-meta
|
||||
docs/debugging/s3-check-md5/s3-check-md5
|
||||
docs/debugging/hash-set/hash-set
|
||||
docs/debugging/healing-bin/healing-bin
|
||||
docs/debugging/inspect/inspect
|
||||
docs/debugging/pprofgoparser/pprofgoparser
|
||||
docs/debugging/reorder-disks/reorder-disks
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,6 +1,6 @@
|
||||
## Community Contribution License
|
||||
All community contributions in this pull request are licensed to the project maintainers
|
||||
under the terms of the [Apache 2 license] (https://www.apache.org/licenses/LICENSE-2.0).
|
||||
under the terms of the [Apache 2 license](https://www.apache.org/licenses/LICENSE-2.0).
|
||||
By creating this pull request I represent that I have the right to license the
|
||||
contributions to the project maintainers under the Apache 2 license.
|
||||
|
||||
|
||||
1
.github/workflows/go-cross.yml
vendored
1
.github/workflows/go-cross.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
||||
1
.github/workflows/go-fips.yml
vendored
1
.github/workflows/go-fips.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
||||
1
.github/workflows/go-healing.yml
vendored
1
.github/workflows/go-healing.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
||||
1
.github/workflows/go-lint.yml
vendored
1
.github/workflows/go-lint.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
||||
1
.github/workflows/go.yml
vendored
1
.github/workflows/go.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
||||
1
.github/workflows/helm-lint.yml
vendored
1
.github/workflows/helm-lint.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
||||
1
.github/workflows/iam-integrations.yaml
vendored
1
.github/workflows/iam-integrations.yaml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
||||
16
.github/workflows/mint.yml
vendored
16
.github/workflows/mint.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -37,7 +38,11 @@ jobs:
|
||||
|
||||
- name: build-minio
|
||||
run: |
|
||||
TAG="minio/minio:${{ steps.vars.outputs.sha_short }}" make docker
|
||||
TAG="quay.io/minio/minio:${{ steps.vars.outputs.sha_short }}" make docker
|
||||
|
||||
- name: multipart uploads test
|
||||
run: |
|
||||
${GITHUB_WORKSPACE}/.github/workflows/multipart/migrate.sh "${{ steps.vars.outputs.sha_short }}"
|
||||
|
||||
- name: compress and encrypt
|
||||
run: |
|
||||
@@ -59,7 +64,14 @@ jobs:
|
||||
docker-compose -f ${GITHUB_WORKSPACE}/.github/workflows/mint/minio-${mode}.yaml down || true
|
||||
docker-compose -f ${GITHUB_WORKSPACE}/.github/workflows/mint/minio-${mode}.yaml rm || true
|
||||
done
|
||||
docker rmi -f minio/minio:${{ steps.vars.outputs.sha_short }}
|
||||
|
||||
docker-compose -f ${GITHUB_WORKSPACE}/.github/workflows/multipart/docker-compose-site1.yaml rm -s -f || true
|
||||
docker-compose -f ${GITHUB_WORKSPACE}/.github/workflows/multipart/docker-compose-site2.yaml rm -s -f || true
|
||||
for volume in $(docker volume ls -q | grep minio); do
|
||||
docker volume rm ${volume} || true
|
||||
done
|
||||
|
||||
docker rmi -f quay.io/minio/minio:${{ steps.vars.outputs.sha_short }}
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
docker volume rm $(docker volume ls -q -f dangling=true) || true
|
||||
|
||||
@@ -2,7 +2,7 @@ version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: minio/minio:${JOB_NAME}
|
||||
image: quay.io/minio/minio:${JOB_NAME}
|
||||
command: server --console-address ":9001" http://minio{1...4}/cdata{1...2}
|
||||
expose:
|
||||
- "9000"
|
||||
@@ -16,10 +16,10 @@ x-minio-common: &minio-common
|
||||
MINIO_COMPRESSION_ALLOW_ENCRYPTION: "on"
|
||||
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
test: ["CMD", "mc", "ready", "local"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
|
||||
10
.github/workflows/mint/minio-erasure.yaml
vendored
10
.github/workflows/mint/minio-erasure.yaml
vendored
@@ -2,7 +2,7 @@ version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: minio/minio:${JOB_NAME}
|
||||
image: quay.io/minio/minio:${JOB_NAME}
|
||||
command: server --console-address ":9001" edata{1...4}
|
||||
expose:
|
||||
- "9000"
|
||||
@@ -13,10 +13,10 @@ x-minio-common: &minio-common
|
||||
MINIO_ROOT_PASSWORD: "minio123"
|
||||
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
test: ["CMD", "mc", "ready", "local"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
|
||||
10
.github/workflows/mint/minio-pools.yaml
vendored
10
.github/workflows/mint/minio-pools.yaml
vendored
@@ -2,7 +2,7 @@ version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: minio/minio:${JOB_NAME}
|
||||
image: quay.io/minio/minio:${JOB_NAME}
|
||||
command: server --console-address ":9001" http://minio{1...4}/pdata{1...2} http://minio{5...8}/pdata{1...2}
|
||||
expose:
|
||||
- "9000"
|
||||
@@ -13,10 +13,10 @@ x-minio-common: &minio-common
|
||||
MINIO_ROOT_PASSWORD: "minio123"
|
||||
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
test: ["CMD", "mc", "ready", "local"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
|
||||
66
.github/workflows/multipart/docker-compose-site1.yaml
vendored
Normal file
66
.github/workflows/multipart/docker-compose-site1.yaml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: quay.io/minio/minio:${RELEASE}
|
||||
command: server http://site1-minio{1...4}/data{1...2}
|
||||
environment:
|
||||
- MINIO_PROMETHEUS_AUTH_TYPE=public
|
||||
- CI=true
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
# it through port 9000.
|
||||
services:
|
||||
site1-minio1:
|
||||
<<: *minio-common
|
||||
hostname: site1-minio1
|
||||
volumes:
|
||||
- site1-data1-1:/data1
|
||||
- site1-data1-2:/data2
|
||||
|
||||
site1-minio2:
|
||||
<<: *minio-common
|
||||
hostname: site1-minio2
|
||||
volumes:
|
||||
- site1-data2-1:/data1
|
||||
- site1-data2-2:/data2
|
||||
|
||||
site1-minio3:
|
||||
<<: *minio-common
|
||||
hostname: site1-minio3
|
||||
volumes:
|
||||
- site1-data3-1:/data1
|
||||
- site1-data3-2:/data2
|
||||
|
||||
site1-minio4:
|
||||
<<: *minio-common
|
||||
hostname: site1-minio4
|
||||
volumes:
|
||||
- site1-data4-1:/data1
|
||||
- site1-data4-2:/data2
|
||||
|
||||
site1-nginx:
|
||||
image: nginx:1.19.2-alpine
|
||||
hostname: site1-nginx
|
||||
volumes:
|
||||
- ./nginx-site1.conf:/etc/nginx/nginx.conf:ro
|
||||
ports:
|
||||
- "9001:9001"
|
||||
depends_on:
|
||||
- site1-minio1
|
||||
- site1-minio2
|
||||
- site1-minio3
|
||||
- site1-minio4
|
||||
|
||||
## By default this config uses default local driver,
|
||||
## For custom volumes replace with volume driver configuration.
|
||||
volumes:
|
||||
site1-data1-1:
|
||||
site1-data1-2:
|
||||
site1-data2-1:
|
||||
site1-data2-2:
|
||||
site1-data3-1:
|
||||
site1-data3-2:
|
||||
site1-data4-1:
|
||||
site1-data4-2:
|
||||
66
.github/workflows/multipart/docker-compose-site2.yaml
vendored
Normal file
66
.github/workflows/multipart/docker-compose-site2.yaml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: quay.io/minio/minio:${RELEASE}
|
||||
command: server http://site2-minio{1...4}/data{1...2}
|
||||
environment:
|
||||
- MINIO_PROMETHEUS_AUTH_TYPE=public
|
||||
- CI=true
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
# it through port 9000.
|
||||
services:
|
||||
site2-minio1:
|
||||
<<: *minio-common
|
||||
hostname: site2-minio1
|
||||
volumes:
|
||||
- site2-data1-1:/data1
|
||||
- site2-data1-2:/data2
|
||||
|
||||
site2-minio2:
|
||||
<<: *minio-common
|
||||
hostname: site2-minio2
|
||||
volumes:
|
||||
- site2-data2-1:/data1
|
||||
- site2-data2-2:/data2
|
||||
|
||||
site2-minio3:
|
||||
<<: *minio-common
|
||||
hostname: site2-minio3
|
||||
volumes:
|
||||
- site2-data3-1:/data1
|
||||
- site2-data3-2:/data2
|
||||
|
||||
site2-minio4:
|
||||
<<: *minio-common
|
||||
hostname: site2-minio4
|
||||
volumes:
|
||||
- site2-data4-1:/data1
|
||||
- site2-data4-2:/data2
|
||||
|
||||
site2-nginx:
|
||||
image: nginx:1.19.2-alpine
|
||||
hostname: site2-nginx
|
||||
volumes:
|
||||
- ./nginx-site2.conf:/etc/nginx/nginx.conf:ro
|
||||
ports:
|
||||
- "9002:9002"
|
||||
depends_on:
|
||||
- site2-minio1
|
||||
- site2-minio2
|
||||
- site2-minio3
|
||||
- site2-minio4
|
||||
|
||||
## By default this config uses default local driver,
|
||||
## For custom volumes replace with volume driver configuration.
|
||||
volumes:
|
||||
site2-data1-1:
|
||||
site2-data1-2:
|
||||
site2-data2-1:
|
||||
site2-data2-2:
|
||||
site2-data3-1:
|
||||
site2-data3-2:
|
||||
site2-data4-1:
|
||||
site2-data4-2:
|
||||
115
.github/workflows/multipart/migrate.sh
vendored
Executable file
115
.github/workflows/multipart/migrate.sh
vendored
Executable file
@@ -0,0 +1,115 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
## change working directory
|
||||
cd .github/workflows/multipart/
|
||||
|
||||
function cleanup() {
|
||||
docker-compose -f docker-compose-site1.yaml rm -s -f || true
|
||||
docker-compose -f docker-compose-site2.yaml rm -s -f || true
|
||||
for volume in $(docker volume ls -q | grep minio); do
|
||||
docker volume rm ${volume} || true
|
||||
done
|
||||
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
docker volume rm $(docker volume ls -q -f dangling=true) || true
|
||||
}
|
||||
|
||||
cleanup
|
||||
|
||||
if [ ! -f ./mc ]; then
|
||||
wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x mc
|
||||
fi
|
||||
|
||||
(
|
||||
cd /tmp
|
||||
go install github.com/minio/minio/docs/debugging/s3-check-md5@latest
|
||||
)
|
||||
|
||||
export RELEASE=RELEASE.2023-08-29T23-07-35Z
|
||||
|
||||
docker-compose -f docker-compose-site1.yaml up -d
|
||||
docker-compose -f docker-compose-site2.yaml up -d
|
||||
|
||||
sleep 30s
|
||||
|
||||
./mc alias set site1 http://site1-nginx:9001 minioadmin minioadmin --api s3v4
|
||||
./mc alias set site2 http://site2-nginx:9002 minioadmin minioadmin --api s3v4
|
||||
|
||||
./mc ready site1/
|
||||
./mc ready site2/
|
||||
|
||||
./mc admin replicate add site1 site2
|
||||
./mc mb site1/testbucket/
|
||||
./mc cp -r --quiet /usr/bin site1/testbucket/
|
||||
|
||||
sleep 5
|
||||
|
||||
s3-check-md5 -h
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
if [ $failed_count_site1 -ne 0 ]; then
|
||||
echo "failed with multipart on site1 uploads"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $failed_count_site2 -ne 0 ]; then
|
||||
echo "failed with multipart on site2 uploads"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
./mc cp -r --quiet /usr/bin site1/testbucket/
|
||||
|
||||
sleep 5
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
## we do not need to fail here, since we are going to test
|
||||
## upgrading to master, healing and being able to recover
|
||||
## the last version.
|
||||
if [ $failed_count_site1 -ne 0 ]; then
|
||||
echo "failed with multipart on site1 uploads ${failed_count_site1}"
|
||||
fi
|
||||
|
||||
if [ $failed_count_site2 -ne 0 ]; then
|
||||
echo "failed with multipart on site2 uploads ${failed_count_site2}"
|
||||
fi
|
||||
|
||||
export RELEASE=${1}
|
||||
|
||||
docker-compose -f docker-compose-site1.yaml up -d
|
||||
docker-compose -f docker-compose-site2.yaml up -d
|
||||
|
||||
./mc ready site1/
|
||||
./mc ready site2/
|
||||
|
||||
for i in $(seq 1 10); do
|
||||
# mc admin heal -r --remove when used against a LB endpoint
|
||||
# behaves flaky, let this run 10 times before giving up
|
||||
./mc admin heal -r --remove --json site1/ 2>&1 >/dev/null
|
||||
./mc admin heal -r --remove --json site2/ 2>&1 >/dev/null
|
||||
done
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
if [ $failed_count_site1 -ne 0 ]; then
|
||||
echo "failed with multipart on site1 uploads"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $failed_count_site2 -ne 0 ]; then
|
||||
echo "failed with multipart on site2 uploads"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cleanup
|
||||
|
||||
## change working directory
|
||||
cd ../../../
|
||||
61
.github/workflows/multipart/nginx-site1.conf
vendored
Normal file
61
.github/workflows/multipart/nginx-site1.conf
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 4096;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
sendfile on;
|
||||
keepalive_timeout 65;
|
||||
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server site1-minio1:9000;
|
||||
server site1-minio2:9000;
|
||||
server site1-minio3:9000;
|
||||
server site1-minio4:9000;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 9001;
|
||||
listen [::]:9001;
|
||||
server_name localhost;
|
||||
|
||||
# To allow special characters in headers
|
||||
ignore_invalid_headers off;
|
||||
# Allow any size file to be uploaded.
|
||||
# Set to a value such as 1000m; to restrict file size to a specific value
|
||||
client_max_body_size 0;
|
||||
# To disable buffering
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
|
||||
location / {
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_connect_timeout 300;
|
||||
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
chunked_transfer_encoding off;
|
||||
|
||||
proxy_pass http://minio;
|
||||
}
|
||||
}
|
||||
}
|
||||
61
.github/workflows/multipart/nginx-site2.conf
vendored
Normal file
61
.github/workflows/multipart/nginx-site2.conf
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 4096;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
sendfile on;
|
||||
keepalive_timeout 65;
|
||||
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server site2-minio1:9000;
|
||||
server site2-minio2:9000;
|
||||
server site2-minio3:9000;
|
||||
server site2-minio4:9000;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 9002;
|
||||
listen [::]:9002;
|
||||
server_name localhost;
|
||||
|
||||
# To allow special characters in headers
|
||||
ignore_invalid_headers off;
|
||||
# Allow any size file to be uploaded.
|
||||
# Set to a value such as 1000m; to restrict file size to a specific value
|
||||
client_max_body_size 0;
|
||||
# To disable buffering
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
|
||||
location / {
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_connect_timeout 300;
|
||||
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
chunked_transfer_encoding off;
|
||||
|
||||
proxy_pass http://minio;
|
||||
}
|
||||
}
|
||||
}
|
||||
1
.github/workflows/replication.yaml
vendored
1
.github/workflows/replication.yaml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
||||
1
.github/workflows/root-disable.yml
vendored
1
.github/workflows/root-disable.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
||||
2
.github/workflows/run-mint.sh
vendored
2
.github/workflows/run-mint.sh
vendored
@@ -16,7 +16,7 @@ docker volume rm $(docker volume ls -f dangling=true) || true
|
||||
cd .github/workflows/mint
|
||||
|
||||
docker-compose -f minio-${MODE}.yaml up -d
|
||||
sleep 5m
|
||||
sleep 30s
|
||||
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
|
||||
1
.github/workflows/shfmt.yml
vendored
1
.github/workflows/shfmt.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
1
.github/workflows/upgrade-ci-cd.yaml
vendored
1
.github/workflows/upgrade-ci-cd.yaml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
||||
3
.github/workflows/vulncheck.yml
vendored
3
.github/workflows/vulncheck.yml
vendored
@@ -3,6 +3,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
@@ -20,7 +21,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.21.0
|
||||
go-version: 1.21.5
|
||||
check-latest: true
|
||||
- name: Get official govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -32,6 +32,8 @@ minio.RELEASE*
|
||||
mc
|
||||
nancy
|
||||
inspects/*
|
||||
.bin/
|
||||
*.gz
|
||||
docs/debugging/s3-verify/s3-verify
|
||||
docs/debugging/xl-meta/xl-meta
|
||||
docs/debugging/s3-check-md5/s3-check-md5
|
||||
@@ -39,5 +41,5 @@ docs/debugging/hash-set/hash-set
|
||||
docs/debugging/healing-bin/healing-bin
|
||||
docs/debugging/inspect/inspect
|
||||
docs/debugging/pprofgoparser/pprofgoparser
|
||||
.bin/
|
||||
*.gz
|
||||
docs/debugging/reorder-disks/reorder-disks
|
||||
docs/debugging/populate-hard-links/populate-hardlinks
|
||||
|
||||
@@ -13,7 +13,6 @@ linters:
|
||||
enable:
|
||||
- durationcheck
|
||||
- gocritic
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
- gomodguard
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
FROM minio/minio:latest
|
||||
|
||||
ENV PATH=/opt/bin:$PATH
|
||||
|
||||
COPY ./minio /opt/bin/minio
|
||||
COPY ./minio /usr/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
@@ -1,4 +1,31 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8
|
||||
FROM golang:1.21-alpine as build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.0
|
||||
|
||||
# Download minio binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
|
||||
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
@@ -17,33 +44,18 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
PATH=/opt/bin:$PATH
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux gzip lsof tar net-tools iproute iputils jq minisign --nodocs && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE} -o /opt/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE}.sha256sum -o /opt/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE}.minisig -o /opt/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
CMD ["minio"]
|
||||
|
||||
@@ -1,6 +1,31 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8
|
||||
FROM golang:1.21-alpine as build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.0
|
||||
|
||||
# Download minio binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
|
||||
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
@@ -19,35 +44,18 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
PATH=/opt/bin:$PATH
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux gzip lsof tar net-tools iproute iputils jq minisign --nodocs && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /opt/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /opt/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /opt/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /opt/bin/mc && \
|
||||
microdnf clean all && \
|
||||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /opt/bin/mc && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
CMD ["minio"]
|
||||
|
||||
@@ -1,6 +1,25 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8
|
||||
FROM golang:1.21-alpine as build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.0
|
||||
|
||||
# Download minio binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /go/bin/minio.minisig && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
@@ -18,34 +37,17 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
PATH=/opt/bin:$PATH
|
||||
MINIO_CONFIG_ENV_FILE=config.env
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux gzip lsof tar net-tools iproute iputils jq minisign --nodocs && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips -o /opt/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.sha256sum -o /opt/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /opt/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
CMD ["minio"]
|
||||
|
||||
61
Dockerfile.release.old_cpu
Normal file
61
Dockerfile.release.old_cpu
Normal file
@@ -0,0 +1,61 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.0
|
||||
|
||||
# Download minio binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
|
||||
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
FROM registry.access.redhat.com/ubi8/ubi-micro:latest
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
VOLUME ["/data"]
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
CMD ["minio"]
|
||||
53
Makefile
53
Makefile
@@ -6,7 +6,7 @@ GOARCH := $(shell go env GOARCH)
|
||||
GOOS := $(shell go env GOOS)
|
||||
|
||||
VERSION ?= $(shell git describe --tags)
|
||||
TAG ?= "minio/minio:$(VERSION)"
|
||||
TAG ?= "quay.io/minio/minio:$(VERSION)"
|
||||
|
||||
GOLANGCI_VERSION = v1.51.2
|
||||
GOLANGCI_DIR = .bin/golangci/$(GOLANGCI_VERSION)
|
||||
@@ -24,7 +24,7 @@ help: ## print this help
|
||||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOLANGCI_DIR) $(GOLANGCI_VERSION)
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@6ac204f0b4d48d17ab4fa442134c7fba13127a4e
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
|
||||
crosscompile: ## cross compile minio
|
||||
@@ -45,22 +45,22 @@ lint-fix: getdeps ## runs golangci-lint suite of linters with automatic fixes
|
||||
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml --fix
|
||||
|
||||
check: test
|
||||
test: verifiers build ## builds minio, runs linters, tests
|
||||
test: verifiers build build-debugging ## builds minio, runs linters, tests
|
||||
@echo "Running unit tests"
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue ./...
|
||||
|
||||
test-root-disable: install
|
||||
test-root-disable: install-race
|
||||
@echo "Running minio root lockdown tests"
|
||||
@env bash $(PWD)/buildscripts/disable-root.sh
|
||||
|
||||
test-decom: install
|
||||
test-decom: install-race
|
||||
@echo "Running minio decom tests"
|
||||
@env bash $(PWD)/docs/distributed/decom.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted-sse-s3.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-compressed-sse-s3.sh
|
||||
|
||||
test-upgrade: build
|
||||
test-upgrade: install-race
|
||||
@echo "Running minio upgrade tests"
|
||||
@(env bash $(PWD)/buildscripts/minio-upgrade.sh)
|
||||
|
||||
@@ -86,18 +86,18 @@ test-replication-3site:
|
||||
test-delete-replication:
|
||||
@(env bash $(PWD)/docs/bucket/replication/delete-replication.sh)
|
||||
|
||||
test-replication: install test-replication-2site test-replication-3site test-delete-replication test-sio-error ## verify multi site replication
|
||||
test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error ## verify multi site replication
|
||||
@echo "Running tests for replicating three sites"
|
||||
|
||||
test-site-replication-ldap: install ## verify automatic site replication
|
||||
test-site-replication-ldap: install-race ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with LDAP)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-ldap.sh)
|
||||
|
||||
test-site-replication-oidc: install ## verify automatic site replication
|
||||
test-site-replication-oidc: install-race ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with OIDC)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-oidc.sh)
|
||||
|
||||
test-site-replication-minio: install ## verify automatic site replication
|
||||
test-site-replication-minio: install-race ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with MinIO IDP)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-minio-idp.sh)
|
||||
|
||||
@@ -128,6 +128,9 @@ verify-healing-inconsistent-versions: ## verify resolving inconsistent versions
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)
|
||||
|
||||
build-debugging:
|
||||
@(env bash $(PWD)/docs/debugging/build.sh)
|
||||
|
||||
build: checks ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@@ -138,14 +141,23 @@ hotfix-vars:
|
||||
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
|
||||
$(eval TAG := "minio/minio:$(VERSION)")
|
||||
|
||||
hotfix: hotfix-vars install ## builds minio binary with hotfix tags
|
||||
@mv -f ./minio ./minio.$(VERSION)
|
||||
@minisign -qQSm ./minio.$(VERSION) -s "${CRED_DIR}/minisign.key" < "${CRED_DIR}/minisign-passphrase"
|
||||
@sha256sum < ./minio.$(VERSION) | sed 's, -,minio.$(VERSION),g' > minio.$(VERSION).sha256sum
|
||||
hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
|
||||
@wget -q -c https://github.com/minio/pkger/releases/download/v2.2.0/pkger_2.2.0_linux_amd64.deb
|
||||
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.0.0/linux-systemd/distributed/minio.service
|
||||
@sudo apt install ./pkger_2.2.0_linux_amd64.deb --yes
|
||||
@mkdir -p minio-release/$(GOOS)-$(GOARCH)/archive
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)
|
||||
@minisign -qQSm minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION) -s "${CRED_DIR}/minisign.key" < "${CRED_DIR}/minisign-passphrase"
|
||||
@sha256sum < minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION) | sed 's, -,minio.$(VERSION),g' > minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION).sha256sum
|
||||
@cp -af minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)* minio-release/$(GOOS)-$(GOARCH)/archive/
|
||||
@pkger -r $(VERSION) --ignore
|
||||
|
||||
hotfix-push: hotfix
|
||||
@scp -q -r minio.$(VERSION)* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive/
|
||||
@scp -q -r minio.$(VERSION)* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
|
||||
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.$(VERSION)"
|
||||
|
||||
docker-hotfix-push: docker-hotfix
|
||||
@@ -159,6 +171,12 @@ docker: build ## builds minio docker container
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
|
||||
|
||||
install-race: checks ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
|
||||
install: build ## builds minio and installs it to $GOPATH/bin.
|
||||
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@@ -174,3 +192,6 @@ clean: ## cleanup all generated assets
|
||||
@rm -rvf build
|
||||
@rm -rvf release
|
||||
@rm -rvf .verify*
|
||||
@rm -rvf minio-release
|
||||
@rm -rvf minio.RELEASE*.hotfix.*
|
||||
@rm -rvf pkger_*.deb
|
||||
|
||||
@@ -56,6 +56,8 @@ done
|
||||
|
||||
set +e
|
||||
|
||||
sleep 10
|
||||
|
||||
./mc ls minioadm/
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "listing failed, 'minioadmin' should be enabled"
|
||||
|
||||
@@ -87,8 +87,8 @@ function verify_rewrite() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
go build ./docs/debugging/s3-check-md5/
|
||||
if ! ./s3-check-md5 \
|
||||
go install github.com/minio/minio/docs/debugging/s3-check-md5@latest
|
||||
if ! s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
@@ -113,7 +113,7 @@ function verify_rewrite() {
|
||||
go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123"
|
||||
sleep 1
|
||||
|
||||
if ! ./s3-check-md5 \
|
||||
if ! s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
|
||||
@@ -82,8 +82,8 @@ function start_minio_16drive() {
|
||||
rm -rf "${WORK_DIR}/xl3/healing-shard-bucket/unaligned"
|
||||
sleep 10
|
||||
|
||||
go build ./docs/debugging/s3-check-md5/
|
||||
if ! ./s3-check-md5 \
|
||||
go install github.com/minio/minio/docs/debugging/s3-check-md5@latest
|
||||
if ! s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
@@ -111,7 +111,7 @@ function start_minio_16drive() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ./s3-check-md5 \
|
||||
if ! s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
@@ -134,7 +134,7 @@ function start_minio_16drive() {
|
||||
|
||||
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
|
||||
|
||||
if ! ./s3-check-md5 \
|
||||
if ! s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
|
||||
@@ -9,11 +9,6 @@ x-minio-common: &minio-common
|
||||
expose:
|
||||
- "9000"
|
||||
- "9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
|
||||
@@ -392,7 +392,8 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
// of bucket metadata
|
||||
zipWriter := zip.NewWriter(w)
|
||||
defer zipWriter.Close()
|
||||
rawDataFn := func(r io.Reader, filename string, sz int) error {
|
||||
|
||||
rawDataFn := func(r io.Reader, filename string, sz int) {
|
||||
header, zerr := zip.FileInfoHeader(dummyFileInfo{
|
||||
name: filename,
|
||||
size: int64(sz),
|
||||
@@ -401,20 +402,13 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
isDir: false,
|
||||
sys: nil,
|
||||
})
|
||||
if zerr != nil {
|
||||
logger.LogIf(ctx, zerr)
|
||||
return nil
|
||||
if zerr == nil {
|
||||
header.Method = zip.Deflate
|
||||
zwriter, zerr := zipWriter.CreateHeader(header)
|
||||
if zerr == nil {
|
||||
io.Copy(zwriter, r)
|
||||
}
|
||||
}
|
||||
header.Method = zip.Deflate
|
||||
zwriter, zerr := zipWriter.CreateHeader(header)
|
||||
if zerr != nil {
|
||||
logger.LogIf(ctx, zerr)
|
||||
return nil
|
||||
}
|
||||
if _, err := io.Copy(zwriter, r); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
cfgFiles := []string{
|
||||
@@ -446,10 +440,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
||||
case bucketLifecycleConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
|
||||
if err != nil {
|
||||
@@ -465,10 +456,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
||||
case bucketQuotaConfigFile:
|
||||
config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
@@ -483,10 +471,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
||||
case bucketSSEConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
if err != nil {
|
||||
@@ -501,10 +486,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
||||
case bucketTaggingConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
|
||||
if err != nil {
|
||||
@@ -519,10 +501,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
||||
case objectLockConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
|
||||
if err != nil {
|
||||
@@ -538,10 +517,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
||||
case bucketVersioningConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetVersioningConfig(bucket)
|
||||
if err != nil {
|
||||
@@ -557,10 +533,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
||||
case bucketReplicationConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
@@ -575,11 +548,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
||||
case bucketTargetsFile:
|
||||
config, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket)
|
||||
if err != nil {
|
||||
@@ -595,10 +564,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,14 +27,14 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
iampolicy "github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
)
|
||||
|
||||
// validateAdminReq will validate request against and return whether it is allowed.
|
||||
// If any of the supplied actions are allowed it will be successful.
|
||||
// If nil ObjectLayer is returned, the operation is not permitted.
|
||||
// When nil ObjectLayer has been returned an error has always been sent to w.
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, actions ...iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, actions ...policy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
@@ -78,7 +78,7 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
|
||||
var apiErr APIError
|
||||
switch e := err.(type) {
|
||||
case iampolicy.Error:
|
||||
case policy.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioMalformedIAMPolicy",
|
||||
Description: e.Error(),
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/config/cache"
|
||||
"github.com/minio/minio/internal/config/etcd"
|
||||
xldap "github.com/minio/minio/internal/config/identity/ldap"
|
||||
"github.com/minio/minio/internal/config/identity/openid"
|
||||
@@ -38,14 +37,14 @@ import (
|
||||
"github.com/minio/minio/internal/config/subnet"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
iampolicy "github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
)
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
|
||||
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -149,7 +148,7 @@ type setConfigResult struct {
|
||||
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -242,7 +241,7 @@ func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (re
|
||||
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -288,7 +287,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -321,7 +320,7 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
|
||||
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -367,7 +366,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -405,7 +404,7 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
|
||||
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -430,7 +429,7 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
||||
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -482,7 +481,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -500,8 +499,6 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
switch hkv.Key {
|
||||
case config.EtcdSubSys:
|
||||
off = !etcd.Enabled(item.Config)
|
||||
case config.CacheSubSys:
|
||||
off = !cache.Enabled(item.Config)
|
||||
case config.StorageClassSubSys:
|
||||
off = !storageclass.Enabled(item.Config)
|
||||
case config.PolicyPluginSubSys:
|
||||
|
||||
@@ -34,11 +34,11 @@ import (
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/ldap"
|
||||
iampolicy "github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
)
|
||||
|
||||
func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, isUpdate bool) {
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -121,7 +121,7 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
|
||||
// IDP config is not dynamic. Sanity check.
|
||||
if dynamic {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), "", r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -220,7 +220,7 @@ func (a adminAPIHandlers) UpdateIdentityProviderCfg(w http.ResponseWriter, r *ht
|
||||
func (a adminAPIHandlers) ListIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -273,7 +273,7 @@ func (a adminAPIHandlers) ListIdentityProviderCfg(w http.ResponseWriter, r *http
|
||||
func (a adminAPIHandlers) GetIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -332,7 +332,7 @@ func (a adminAPIHandlers) GetIdentityProviderCfg(w http.ResponseWriter, r *http.
|
||||
func (a adminAPIHandlers) DeleteIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
iampolicy "github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
)
|
||||
|
||||
// ListLDAPPolicyMappingEntities lists users/groups mapped to given/all policies.
|
||||
@@ -50,7 +50,7 @@ func (a adminAPIHandlers) ListLDAPPolicyMappingEntities(w http.ResponseWriter, r
|
||||
// Check authorization.
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r,
|
||||
iampolicy.ListGroupsAdminAction, iampolicy.ListUsersAdminAction, iampolicy.ListUserPoliciesAdminAction)
|
||||
policy.ListGroupsAdminAction, policy.ListUsersAdminAction, policy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -96,7 +96,7 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
|
||||
|
||||
// Check authorization.
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.UpdatePolicyAssociationAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.UpdatePolicyAssociationAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
iampolicy "github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -37,7 +37,7 @@ var (
|
||||
func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DecommissionAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -113,7 +113,7 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
|
||||
func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DecommissionAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -159,7 +159,7 @@ func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction, iampolicy.DecommissionAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ServerInfoAdminAction, policy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -200,7 +200,7 @@ func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction, iampolicy.DecommissionAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ServerInfoAdminAction, policy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -233,7 +233,7 @@ func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.RebalanceAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.RebalanceAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -304,7 +304,7 @@ func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request)
|
||||
func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.RebalanceAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.RebalanceAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -344,7 +344,7 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request
|
||||
func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.RebalanceAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.RebalanceAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
@@ -51,7 +52,8 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites)
|
||||
opts := getSRAddOptions(r)
|
||||
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites, opts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -67,6 +69,11 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
func getSRAddOptions(r *http.Request) (opts madmin.SRAddOptions) {
|
||||
opts.ReplicateILMExpiry = r.Form.Get("replicateILMExpiry") == "true"
|
||||
return
|
||||
}
|
||||
|
||||
// SRPeerJoin - PUT /minio/admin/v3/site-replication/join
|
||||
//
|
||||
// used internally to tell current cluster to enable SR with
|
||||
@@ -191,7 +198,7 @@ func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.
|
||||
}
|
||||
}
|
||||
|
||||
// SRPeerReplicateBucketItem - PUT /minio/admin/v3/site-replication/bucket-meta
|
||||
// SRPeerReplicateBucketItem - PUT /minio/admin/v3/site-replication/peer/bucket-meta
|
||||
func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@@ -252,6 +259,8 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
|
||||
err = globalSiteReplicationSys.PeerBucketObjectLockConfigHandler(ctx, item.Bucket, item.ObjectLockConfig, item.UpdatedAt)
|
||||
case madmin.SRBucketMetaTypeSSEConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketSSEConfigHandler(ctx, item.Bucket, item.SSEConfig, item.UpdatedAt)
|
||||
case madmin.SRBucketMetaLCConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketLCConfigHandler(ctx, item.Bucket, item.ExpiryLCConfig, item.UpdatedAt)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
@@ -333,6 +342,7 @@ func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.R
|
||||
opts.Users = true
|
||||
opts.Policies = true
|
||||
opts.Groups = true
|
||||
opts.ILMExpiryRules = true
|
||||
}
|
||||
info, err := globalSiteReplicationSys.SiteReplicationStatus(ctx, objectAPI, opts)
|
||||
if err != nil {
|
||||
@@ -382,7 +392,9 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site)
|
||||
|
||||
opts := getSREditOptions(r)
|
||||
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site, opts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -397,6 +409,12 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
func getSREditOptions(r *http.Request) (opts madmin.SREditOptions) {
|
||||
opts.DisableILMExpiryReplication = r.Form.Get("disableILMExpiryReplication") == "true"
|
||||
opts.EnableILMExpiryReplication = r.Form.Get("enableILMExpiryReplication") == "true"
|
||||
return
|
||||
}
|
||||
|
||||
// SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit
|
||||
//
|
||||
// used internally to tell current cluster to update endpoint for peer
|
||||
@@ -421,12 +439,37 @@ func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// SRStateEdit - PUT /minio/admin/v3/site-replication/state/edit
|
||||
//
|
||||
// used internally to tell current cluster to update site replication state
|
||||
func (a adminAPIHandlers) SRStateEdit(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationOperationAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var state madmin.SRStateEditReq
|
||||
if err := parseJSONBody(ctx, r.Body, &state, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err := globalSiteReplicationSys.PeerStateEditReq(ctx, state); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
|
||||
q := r.Form
|
||||
opts.Buckets = q.Get("buckets") == "true"
|
||||
opts.Policies = q.Get("policies") == "true"
|
||||
opts.Groups = q.Get("groups") == "true"
|
||||
opts.Users = q.Get("users") == "true"
|
||||
opts.ILMExpiryRules = q.Get("ilm-expiry-rules") == "true"
|
||||
opts.PeerState = q.Get("peer-state") == "true"
|
||||
opts.Entity = madmin.GetSREntityType(q.Get("entity"))
|
||||
opts.EntityValue = q.Get("entityvalue")
|
||||
opts.ShowDeleted = q.Get("showDeleted") == "true"
|
||||
@@ -537,7 +580,7 @@ func (a adminAPIHandlers) SiteReplicationDevNull(w http.ResponseWriter, r *http.
|
||||
|
||||
connectTime := time.Now()
|
||||
for {
|
||||
n, err := io.CopyN(io.Discard, r.Body, 128*humanize.KiByte)
|
||||
n, err := io.CopyN(xioutil.Discard, r.Body, 128*humanize.KiByte)
|
||||
atomic.AddUint64(&globalSiteNetPerfRX.RX, uint64(n))
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
// If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2023 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -34,14 +35,14 @@ import (
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
iampolicy "github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
)
|
||||
|
||||
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
|
||||
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.DeleteUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -85,7 +86,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) ListBucketUsers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ListUsersAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -119,7 +120,7 @@ func (a adminAPIHandlers) ListBucketUsers(w http.ResponseWriter, r *http.Request
|
||||
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ListUsersAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -186,10 +187,10 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
checkDenyOnly = true
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.GetUserAdminAction,
|
||||
Action: policy.GetUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
@@ -218,7 +219,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.AddUserToGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -288,7 +289,7 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetGroupAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.GetGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -315,7 +316,7 @@ func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ListGroupsAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -339,7 +340,7 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.EnableGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -382,7 +383,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
||||
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, creds := validateAdminReq(ctx, w, r, iampolicy.EnableUserAdminAction)
|
||||
objectAPI, creds := validateAdminReq(ctx, w, r, policy.EnableUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -470,10 +471,10 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
checkDenyOnly = true
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
Action: policy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
@@ -544,34 +545,41 @@ func (a adminAPIHandlers) TemporaryAccountInfo(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
args := policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListTemporaryAccountsAdminAction,
|
||||
Action: policy.ListTemporaryAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(args) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
stsAccount, policy, err := globalIAMSys.GetTemporaryAccount(ctx, accessKey)
|
||||
stsAccount, sessionPolicy, err := globalIAMSys.GetTemporaryAccount(ctx, accessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var stsAccountPolicy iampolicy.Policy
|
||||
var stsAccountPolicy policy.Policy
|
||||
|
||||
if policy != nil {
|
||||
stsAccountPolicy = *policy
|
||||
if sessionPolicy != nil {
|
||||
stsAccountPolicy = *sessionPolicy
|
||||
} else {
|
||||
policiesNames, err := globalIAMSys.PolicyDBGet(stsAccount.ParentUser, false)
|
||||
policiesNames, err := globalIAMSys.PolicyDBGet(stsAccount.ParentUser, stsAccount.Groups...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if len(policiesNames) == 0 {
|
||||
policySet, _ := args.GetPolicies(iamPolicyClaimNameOpenID())
|
||||
policiesNames = policySet.ToSlice()
|
||||
}
|
||||
|
||||
stsAccountPolicy = globalIAMSys.GetCombinedPolicy(policiesNames...)
|
||||
}
|
||||
|
||||
@@ -584,7 +592,7 @@ func (a adminAPIHandlers) TemporaryAccountInfo(w http.ResponseWriter, r *http.Re
|
||||
infoResp := madmin.TemporaryAccountInfoResp{
|
||||
ParentUser: stsAccount.ParentUser,
|
||||
AccountStatus: stsAccount.Status,
|
||||
ImpliedPolicy: policy == nil,
|
||||
ImpliedPolicy: sessionPolicy == nil,
|
||||
Policy: string(policyJSON),
|
||||
Expiration: &stsAccount.Expiration,
|
||||
}
|
||||
@@ -709,10 +717,10 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
//
|
||||
// This allows turning off service accounts for request sender,
|
||||
// if there is no deny statement this call is implicitly enabled.
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: requestorUser,
|
||||
Groups: requestorGroups,
|
||||
Action: iampolicy.CreateServiceAccountAdminAction,
|
||||
Action: policy.CreateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
@@ -743,10 +751,10 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
} else {
|
||||
// Need permission if we are creating a service account for a
|
||||
// user <> to the request sender
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: requestorUser,
|
||||
Groups: requestorGroups,
|
||||
Action: iampolicy.CreateServiceAccountAdminAction,
|
||||
Action: policy.CreateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
@@ -773,13 +781,16 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
// latter, a group notion is not supported.
|
||||
}
|
||||
|
||||
var sp *iampolicy.Policy
|
||||
var sp *policy.Policy
|
||||
if len(createReq.Policy) > 0 {
|
||||
sp, err = iampolicy.ParseConfig(bytes.NewReader(createReq.Policy))
|
||||
sp, err = policy.ParseConfig(bytes.NewReader(createReq.Policy))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if sp.Version == "" && len(sp.Statements) == 0 {
|
||||
sp = nil
|
||||
}
|
||||
}
|
||||
|
||||
opts.sessionPolicy = sp
|
||||
@@ -864,10 +875,10 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.UpdateServiceAccountAdminAction,
|
||||
Action: policy.UpdateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
@@ -903,13 +914,16 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
var sp *iampolicy.Policy
|
||||
var sp *policy.Policy
|
||||
if len(updateReq.NewPolicy) > 0 {
|
||||
sp, err = iampolicy.ParseConfig(bytes.NewReader(updateReq.NewPolicy))
|
||||
sp, err = policy.ParseConfig(bytes.NewReader(updateReq.NewPolicy))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if sp.Version == "" && len(sp.Statements) == 0 {
|
||||
sp = nil
|
||||
}
|
||||
}
|
||||
opts := updateServiceAccountOpts{
|
||||
secretKey: updateReq.NewSecretKey,
|
||||
@@ -970,16 +984,16 @@ func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
svcAccount, policy, err := globalIAMSys.GetServiceAccount(ctx, accessKey)
|
||||
svcAccount, sessionPolicy, err := globalIAMSys.GetServiceAccount(ctx, accessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListServiceAccountsAdminAction,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
@@ -995,12 +1009,15 @@ func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
var svcAccountPolicy iampolicy.Policy
|
||||
// if session policy is nil or empty, then it is implied policy
|
||||
impliedPolicy := sessionPolicy == nil || (sessionPolicy.Version == "" && len(sessionPolicy.Statements) == 0)
|
||||
|
||||
if policy != nil {
|
||||
svcAccountPolicy = *policy
|
||||
var svcAccountPolicy policy.Policy
|
||||
|
||||
if !impliedPolicy {
|
||||
svcAccountPolicy = *sessionPolicy
|
||||
} else {
|
||||
policiesNames, err := globalIAMSys.PolicyDBGet(svcAccount.ParentUser, false)
|
||||
policiesNames, err := globalIAMSys.PolicyDBGet(svcAccount.ParentUser, svcAccount.Groups...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1024,7 +1041,7 @@ func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Requ
|
||||
Name: svcAccount.Name,
|
||||
Description: svcAccount.Description,
|
||||
AccountStatus: svcAccount.Status,
|
||||
ImpliedPolicy: policy == nil,
|
||||
ImpliedPolicy: impliedPolicy,
|
||||
Policy: string(policyJSON),
|
||||
Expiration: expiration,
|
||||
}
|
||||
@@ -1067,10 +1084,10 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
// sender), check that the user has permissions.
|
||||
user := r.Form.Get("user")
|
||||
if user != "" && user != cred.AccessKey {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListServiceAccountsAdminAction,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
@@ -1146,12 +1163,16 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
|
||||
// We do not care if service account is readable or not at this point,
|
||||
// since this is a delete call we shall allow it to be deleted if possible.
|
||||
svcAccount, _, _ := globalIAMSys.GetServiceAccount(ctx, serviceAccount)
|
||||
svcAccount, _, err := globalIAMSys.GetServiceAccount(ctx, serviceAccount)
|
||||
if errors.Is(err, errNoSuchServiceAccount) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminServiceAccountNotFound), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
adminPrivilege := globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
adminPrivilege := globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.RemoveServiceAccountAdminAction,
|
||||
Action: policy.RemoveServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
@@ -1192,7 +1213,7 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// AccountInfoHandler returns usage
|
||||
// AccountInfoHandler returns usage, permissions and other bucket metadata for incoming us
|
||||
func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@@ -1219,10 +1240,10 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
enablePrefixUsage := r.Form.Get("prefix-usage") == "true"
|
||||
|
||||
isAllowedAccess := func(bucketName string) (rd, wr bool) {
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
if globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
Action: policy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
@@ -1232,10 +1253,10 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
rd = true
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
if globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.GetBucketLocationAction,
|
||||
Action: policy.GetBucketLocationAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
@@ -1245,10 +1266,10 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
rd = true
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
if globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
Action: policy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
@@ -1261,12 +1282,30 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
return rd, wr
|
||||
}
|
||||
|
||||
// Load the latest calculated data usage
|
||||
dataUsageInfo, _ := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
bucketStorageCache.Once.Do(func() {
|
||||
// Set this to 10 secs since its enough, as scanner
|
||||
// does not update the bucket usage values frequently.
|
||||
bucketStorageCache.TTL = 10 * time.Second
|
||||
|
||||
// Rely on older value if usage loading fails from disk.
|
||||
bucketStorageCache.Relax = true
|
||||
bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objectAPI)
|
||||
}
|
||||
})
|
||||
|
||||
var dataUsageInfo DataUsageInfo
|
||||
v, _ := bucketStorageCache.Get()
|
||||
if v != nil {
|
||||
dataUsageInfo, _ = v.(DataUsageInfo)
|
||||
}
|
||||
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
var buckets []BucketInfo
|
||||
var err error
|
||||
var buckets []BucketInfo
|
||||
if globalDNSConfig != nil && globalBucketFederation {
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && !IsErrIgnored(err,
|
||||
@@ -1285,7 +1324,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
return buckets[i].Name < buckets[j].Name
|
||||
})
|
||||
} else {
|
||||
buckets, err = objectAPI.ListBuckets(ctx, BucketOptions{})
|
||||
buckets, err = objectAPI.ListBuckets(ctx, BucketOptions{Cached: true})
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1298,14 +1337,14 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
accountName = cred.ParentUser
|
||||
}
|
||||
|
||||
roleArn := iampolicy.Args{Claims: cred.Claims}.GetRoleArn()
|
||||
policySetFromClaims, hasPolicyClaim := iampolicy.GetPoliciesFromClaims(cred.Claims, iamPolicyClaimNameOpenID())
|
||||
var effectivePolicy iampolicy.Policy
|
||||
roleArn := policy.Args{Claims: cred.Claims}.GetRoleArn()
|
||||
policySetFromClaims, hasPolicyClaim := policy.GetPoliciesFromClaims(cred.Claims, iamPolicyClaimNameOpenID())
|
||||
var effectivePolicy policy.Policy
|
||||
|
||||
var buf []byte
|
||||
switch {
|
||||
case accountName == globalActiveCred.AccessKey:
|
||||
for _, policy := range iampolicy.DefaultPolicies {
|
||||
for _, policy := range policy.DefaultPolicies {
|
||||
if policy.Name == "consoleAdmin" {
|
||||
effectivePolicy = policy.Definition
|
||||
break
|
||||
@@ -1325,7 +1364,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
effectivePolicy = globalIAMSys.GetCombinedPolicy(policySetFromClaims.ToSlice()...)
|
||||
|
||||
default:
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, false, cred.Groups...)
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, cred.Groups...)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -1417,7 +1456,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1463,7 +1502,7 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1475,7 +1514,7 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
newPolicies := make(map[string]iampolicy.Policy)
|
||||
newPolicies := make(map[string]policy.Policy)
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
@@ -1494,7 +1533,7 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1505,7 +1544,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
newPolicies := make(map[string]iampolicy.Policy)
|
||||
newPolicies := make(map[string]policy.Policy)
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
@@ -1524,7 +1563,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.DeletePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1550,7 +1589,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.CreatePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1582,7 +1621,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
iamPolicy, err := iampolicy.ParseConfig(bytes.NewReader(iamPolicyBytes))
|
||||
iamPolicy, err := policy.ParseConfig(bytes.NewReader(iamPolicyBytes))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1614,7 +1653,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.AttachPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1688,7 +1727,7 @@ func (a adminAPIHandlers) ListPolicyMappingEntities(w http.ResponseWriter, r *ht
|
||||
|
||||
// Check authorization.
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r,
|
||||
iampolicy.ListGroupsAdminAction, iampolicy.ListUsersAdminAction, iampolicy.ListUserPoliciesAdminAction)
|
||||
policy.ListGroupsAdminAction, policy.ListUsersAdminAction, policy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1726,8 +1765,8 @@ func (a adminAPIHandlers) ListPolicyMappingEntities(w http.ResponseWriter, r *ht
|
||||
func (a adminAPIHandlers) AttachDetachPolicyBuiltin(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.UpdatePolicyAssociationAction,
|
||||
iampolicy.AttachPolicyAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, policy.UpdatePolicyAssociationAction,
|
||||
policy.AttachPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1826,7 +1865,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ExportIAMAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ExportIAMAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -2096,7 +2135,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
default:
|
||||
defer f.Close()
|
||||
var allPolicies map[string]iampolicy.Policy
|
||||
var allPolicies map[string]policy.Policy
|
||||
data, err = io.ReadAll(f)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, allPoliciesFile, ""), r.URL)
|
||||
@@ -2177,10 +2216,10 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
checkDenyOnly = true
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
Action: policy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
@@ -2257,10 +2296,10 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
for user, svcAcctReq := range serviceAcctReqs {
|
||||
var sp *iampolicy.Policy
|
||||
var sp *policy.Policy
|
||||
var err error
|
||||
if len(svcAcctReq.SessionPolicy) > 0 {
|
||||
sp, err = iampolicy.ParseConfig(bytes.NewReader(svcAcctReq.SessionPolicy))
|
||||
sp, err = policy.ParseConfig(bytes.NewReader(svcAcctReq.SessionPolicy))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
@@ -2271,10 +2310,10 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateServiceAccountAdminAction,
|
||||
Action: policy.CreateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
|
||||
@@ -52,18 +52,22 @@ import (
|
||||
"github.com/minio/minio/internal/dsync"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
iampolicy "github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/secure-io/sio-go"
|
||||
"github.com/zeebo/xxh3"
|
||||
)
|
||||
|
||||
const (
|
||||
maxEConfigJSONSize = 262272
|
||||
kubernetesVersionEndpoint = "https://kubernetes.default.svc/version"
|
||||
anonymizeParam = "anonymize"
|
||||
anonymizeStrict = "strict"
|
||||
)
|
||||
|
||||
// Only valid query params for mgmt admin APIs.
|
||||
@@ -81,7 +85,7 @@ const (
|
||||
func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerUpdateAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ServerUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -252,11 +256,11 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
|
||||
var objectAPI ObjectLayer
|
||||
switch serviceSig {
|
||||
case serviceRestart:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceRestartAdminAction)
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceRestartAdminAction)
|
||||
case serviceStop:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceStopAdminAction)
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceStopAdminAction)
|
||||
case serviceFreeze, serviceUnFreeze:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceFreezeAdminAction)
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceFreezeAdminAction)
|
||||
}
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -331,7 +335,7 @@ type ServerHTTPStats struct {
|
||||
func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.StorageInfoAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.StorageInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -370,7 +374,7 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ServerInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -479,7 +483,7 @@ func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request)
|
||||
func (a adminAPIHandlers) DataUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DataUsageInfoAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.DataUsageInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -562,7 +566,7 @@ type PeerLocks struct {
|
||||
func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ForceUnlockAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ForceUnlockAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -597,7 +601,7 @@ func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.TopLocksAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.TopLocksAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -648,7 +652,7 @@ func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.R
|
||||
ctx := r.Context()
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ProfilingAdminAction, "")
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, policy.ProfilingAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return
|
||||
@@ -733,7 +737,7 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request)
|
||||
ctx := r.Context()
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ProfilingAdminAction, "")
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, policy.ProfilingAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return
|
||||
@@ -754,11 +758,8 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
}
|
||||
// read request body
|
||||
io.CopyN(io.Discard, r.Body, 1)
|
||||
|
||||
globalProfilerMu.Lock()
|
||||
|
||||
if globalProfiler == nil {
|
||||
globalProfiler = make(map[string]minioProfiler, 10)
|
||||
}
|
||||
@@ -828,7 +829,7 @@ func (a adminAPIHandlers) DownloadProfilingHandler(w http.ResponseWriter, r *htt
|
||||
ctx := r.Context()
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ProfilingAdminAction, "")
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, policy.ProfilingAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return
|
||||
@@ -926,7 +927,7 @@ func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reade
|
||||
func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1001,7 +1002,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
Message: hr.errBody,
|
||||
Resource: r.URL.Path,
|
||||
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
||||
HostID: globalDeploymentID,
|
||||
HostID: globalDeploymentID(),
|
||||
})
|
||||
}
|
||||
if !started {
|
||||
@@ -1056,7 +1057,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
respCh := make(chan healResp)
|
||||
respCh := make(chan healResp, 1)
|
||||
switch {
|
||||
case hip.forceStop:
|
||||
go func() {
|
||||
@@ -1110,7 +1111,7 @@ func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmi
|
||||
func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1131,7 +1132,7 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
||||
func (a adminAPIHandlers) SitePerfHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1179,7 +1180,7 @@ func (a adminAPIHandlers) SitePerfHandler(w http.ResponseWriter, r *http.Request
|
||||
func (a adminAPIHandlers) ClientDevNullExtraTime(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.BandwidthMonitorAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.BandwidthMonitorAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1196,7 +1197,7 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request)
|
||||
ctx := r.Context()
|
||||
|
||||
timeStart := time.Now()
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.BandwidthMonitorAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.BandwidthMonitorAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1218,7 +1219,7 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request)
|
||||
totalRx := int64(0)
|
||||
connectTime := time.Now()
|
||||
for {
|
||||
n, err := io.CopyN(io.Discard, r.Body, 128*humanize.KiByte)
|
||||
n, err := io.CopyN(xioutil.Discard, r.Body, 128*humanize.KiByte)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
// would mean the network is not stable. Logging here will help in debugging network issues.
|
||||
if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) {
|
||||
@@ -1238,7 +1239,7 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request)
|
||||
func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1284,7 +1285,7 @@ func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request)
|
||||
func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1400,13 +1401,21 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
func makeObjectPerfBucket(ctx context.Context, objectAPI ObjectLayer, bucketName string) (bucketExists bool, err error) {
|
||||
if err = objectAPI.MakeBucket(ctx, bucketName, MakeBucketOptions{}); err != nil {
|
||||
if err = objectAPI.MakeBucket(ctx, bucketName, MakeBucketOptions{VersioningEnabled: globalSiteReplicationSys.isEnabled()}); err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
// Only BucketExists error can be ignored.
|
||||
return false, err
|
||||
}
|
||||
bucketExists = true
|
||||
}
|
||||
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
configData := []byte(`<VersioningConfiguration><Status>Enabled</Status><ExcludedPrefixes><Prefix>speedtest/*</Prefix></ExcludedPrefixes></VersioningConfiguration>`)
|
||||
if _, err = globalBucketMetadataSys.Update(ctx, bucketName, bucketVersioningConfig, configData); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return bucketExists, nil
|
||||
}
|
||||
|
||||
@@ -1445,7 +1454,7 @@ func validateObjPerfOptions(ctx context.Context, storageInfo madmin.StorageInfo,
|
||||
func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1566,7 +1575,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.TraceAdminAction, "")
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, policy.TraceAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return
|
||||
@@ -1581,7 +1590,9 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Trace Publisher and peer-trace-client uses nonblocking send and hence does not wait for slow receivers.
|
||||
// Use buffered channel to take care of burst sends or slow w.Write()
|
||||
traceCh := make(chan madmin.TraceInfo, 4000)
|
||||
|
||||
// Keep 100k buffered channel, should be sufficient to ensure we do not lose any events.
|
||||
traceCh := make(chan madmin.TraceInfo, 100000)
|
||||
|
||||
peers, _ := newPeerRestClients(globalEndpoints)
|
||||
|
||||
@@ -1605,7 +1616,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
peer.Trace(traceCh, ctx.Done(), traceOpts)
|
||||
}
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
keepAliveTicker := time.NewTicker(time.Second)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
@@ -1637,7 +1648,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConsoleLogAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConsoleLogAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1718,7 +1729,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSCreateKeyAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSCreateKeyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1739,7 +1750,7 @@ func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Req
|
||||
func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSKeyStatusAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSKeyStatusAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1776,7 +1787,7 @@ func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Reques
|
||||
func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSKeyStatusAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSKeyStatusAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1980,7 +1991,7 @@ func getServerInfo(ctx context.Context, poolsInfoEnabled bool, r *http.Request)
|
||||
Domain: domain,
|
||||
Region: globalSite.Region,
|
||||
SQSARN: globalEventNotifier.GetARNList(false),
|
||||
DeploymentID: globalDeploymentID,
|
||||
DeploymentID: globalDeploymentID(),
|
||||
Buckets: buckets,
|
||||
Objects: objects,
|
||||
Versions: versions,
|
||||
@@ -2025,8 +2036,14 @@ func getKubernetesInfo(dctx context.Context) madmin.KubernetesInfo {
|
||||
|
||||
func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *url.Values, healthInfoCh chan madmin.HealthInfo, healthInfo madmin.HealthInfo) {
|
||||
hostAnonymizer := createHostAnonymizer()
|
||||
// anonAddr - Anonymizes hosts in given input string.
|
||||
|
||||
anonParam := query.Get(anonymizeParam)
|
||||
// anonAddr - Anonymizes hosts in given input string
|
||||
// (only if the anonymize param is set to srict).
|
||||
anonAddr := func(addr string) string {
|
||||
if anonParam != anonymizeStrict {
|
||||
return addr
|
||||
}
|
||||
newAddr, found := hostAnonymizer[addr]
|
||||
if found {
|
||||
return newAddr
|
||||
@@ -2087,6 +2104,20 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur
|
||||
}
|
||||
}
|
||||
|
||||
getAndWriteNetInfo := func() {
|
||||
if query.Get(string(madmin.HealthDataTypeSysNet)) == "true" {
|
||||
localNetInfo := madmin.GetNetInfo(globalLocalNodeName, globalInternodeInterface)
|
||||
healthInfo.Sys.NetInfo = append(healthInfo.Sys.NetInfo, localNetInfo)
|
||||
|
||||
peerNetInfos := globalNotificationSys.GetNetInfo(healthCtx)
|
||||
for _, n := range peerNetInfos {
|
||||
anonymizeAddr(&n)
|
||||
healthInfo.Sys.NetInfo = append(healthInfo.Sys.NetInfo, n)
|
||||
}
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
}
|
||||
|
||||
getAndWriteOSInfo := func() {
|
||||
if query.Get("sysosinfo") == "true" {
|
||||
localOSInfo := madmin.GetOSInfo(healthCtx, globalLocalNodeName)
|
||||
@@ -2187,6 +2218,10 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur
|
||||
}
|
||||
|
||||
anonymizeCmdLine := func(cmdLine string) string {
|
||||
if anonParam != anonymizeStrict {
|
||||
return cmdLine
|
||||
}
|
||||
|
||||
if !globalIsDistErasure {
|
||||
// FS mode - single server - hard code to `server1`
|
||||
anonCmdLine := strings.ReplaceAll(cmdLine, globalLocalNodeName, "server1")
|
||||
@@ -2308,6 +2343,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur
|
||||
getAndWritePlatformInfo()
|
||||
getAndWriteCPUs()
|
||||
getAndWritePartitions()
|
||||
getAndWriteNetInfo()
|
||||
getAndWriteOSInfo()
|
||||
getAndWriteMemInfo()
|
||||
getAndWriteProcInfo()
|
||||
@@ -2376,7 +2412,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur
|
||||
func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -2390,7 +2426,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
Version: madmin.HealthInfoVersion,
|
||||
Minio: madmin.MinioHealthInfo{
|
||||
Info: madmin.MinioInfo{
|
||||
DeploymentID: globalDeploymentID,
|
||||
DeploymentID: globalDeploymentID(),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -2465,11 +2501,27 @@ func getTLSInfo() madmin.TLSInfo {
|
||||
|
||||
if globalIsTLS {
|
||||
for _, c := range globalPublicCerts {
|
||||
check := xxh3.Hash(c.RawIssuer)
|
||||
check ^= xxh3.Hash(c.RawSubjectPublicKeyInfo)
|
||||
// We XOR, so order doesn't matter.
|
||||
for _, v := range c.DNSNames {
|
||||
check ^= xxh3.HashString(v)
|
||||
}
|
||||
for _, v := range c.EmailAddresses {
|
||||
check ^= xxh3.HashString(v)
|
||||
}
|
||||
for _, v := range c.IPAddresses {
|
||||
check ^= xxh3.HashString(v.String())
|
||||
}
|
||||
for _, v := range c.URIs {
|
||||
check ^= xxh3.HashString(v.String())
|
||||
}
|
||||
tlsInfo.Certs = append(tlsInfo.Certs, madmin.TLSCert{
|
||||
PubKeyAlgo: c.PublicKeyAlgorithm.String(),
|
||||
SignatureAlgo: c.SignatureAlgorithm.String(),
|
||||
NotBefore: c.NotBefore,
|
||||
NotAfter: c.NotAfter,
|
||||
Checksum: strconv.FormatUint(check, 16),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2483,7 +2535,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
ctx := r.Context()
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ServerInfoAdminAction, "")
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, policy.ServerInfoAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return
|
||||
@@ -2520,7 +2572,7 @@ func assignPoolNumbers(servers []madmin.ServerProperties) {
|
||||
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
||||
|
||||
for _, tgt := range globalNotifyTargetList.Targets() {
|
||||
for _, tgt := range globalEventNotifier.Targets() {
|
||||
targetIDStatus := make(map[string]madmin.Status)
|
||||
active, _ := tgt.IsActive()
|
||||
targetID := tgt.ID()
|
||||
@@ -2688,7 +2740,7 @@ func getClusterMetaInfo(ctx context.Context) []byte {
|
||||
ci.Info.NoOfBuckets = dataUsageInfo.BucketsCount
|
||||
ci.Info.NoOfObjects = dataUsageInfo.ObjectsTotalCount
|
||||
|
||||
ci.DeploymentID = globalDeploymentID
|
||||
ci.DeploymentID = globalDeploymentID()
|
||||
ci.ClusterName = fmt.Sprintf("%d-servers-%d-disks-%s", ci.Info.NoOfServers, ci.Info.NoOfDrives, ci.Info.MinioVersion)
|
||||
|
||||
select {
|
||||
@@ -2735,7 +2787,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
ctx := r.Context()
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.InspectDataAction, "")
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, policy.InspectDataAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return
|
||||
@@ -2929,9 +2981,14 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
sb.WriteString("\n")
|
||||
logger.LogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600))
|
||||
|
||||
scheme := "https"
|
||||
if !globalIsTLS {
|
||||
scheme = "http"
|
||||
}
|
||||
|
||||
// save MinIO start script to inspect command
|
||||
var scrb bytes.Buffer
|
||||
scrb.WriteString(`#!/usr/bin/env bash
|
||||
fmt.Fprintf(&scrb, `#!/usr/bin/env bash
|
||||
|
||||
function main() {
|
||||
for file in $(ls -1); do
|
||||
@@ -2940,10 +2997,10 @@ function main() {
|
||||
done
|
||||
|
||||
# Read content of inspect-input.txt
|
||||
MINIO_OPTS=$(grep "Server command line args" <./inspect-input.txt | sed "s/Server command line args: //g" | sed -r "s#https:\/\/#\.\/#g")
|
||||
MINIO_OPTS=$(grep "Server command line args" <./inspect-input.txt | sed "s/Server command line args: //g" | sed -r "s#%s:\/\/#\.\/#g")
|
||||
|
||||
# Start MinIO instance using the options
|
||||
START_CMD="CI=on _MINIO_AUTO_DISK_HEALING=off minio server ${MINIO_OPTS} &"
|
||||
START_CMD="CI=on _MINIO_AUTO_DRIVE_HEALING=off minio server ${MINIO_OPTS} &"
|
||||
echo
|
||||
echo "Starting MinIO instance: ${START_CMD}"
|
||||
echo
|
||||
@@ -2955,8 +3012,7 @@ function main() {
|
||||
sleep 10
|
||||
}
|
||||
|
||||
main "$@"`,
|
||||
)
|
||||
main "$@"`, scheme)
|
||||
logger.LogIf(ctx, embedFileInZip(inspectZipW, "start-minio.sh", scrb.Bytes(), 0o755))
|
||||
}
|
||||
|
||||
|
||||
@@ -683,13 +683,6 @@ func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) logHeal(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
|
||||
// Send heal request
|
||||
task := healTask{
|
||||
|
||||
@@ -376,6 +376,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/edit").HandlerFunc(adminMiddleware(adminAPI.SRPeerEdit))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/remove").HandlerFunc(adminMiddleware(adminAPI.SRPeerRemove))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/resync/op").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationResyncOp)).Queries("operation", "{operation:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/state/edit").HandlerFunc(adminMiddleware(adminAPI.SRStateEdit))
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Top locks
|
||||
|
||||
@@ -310,6 +310,7 @@ const (
|
||||
ErrSiteReplicationBucketMetaError
|
||||
ErrSiteReplicationIAMError
|
||||
ErrSiteReplicationConfigMissing
|
||||
ErrSiteReplicationIAMConfigMismatch
|
||||
|
||||
// Pool rebalance errors
|
||||
ErrAdminRebalanceAlreadyStarted
|
||||
@@ -1512,6 +1513,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Site not found in site replication configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrSiteReplicationIAMConfigMismatch: {
|
||||
Code: "XMinioSiteReplicationIAMConfigMismatch",
|
||||
Description: "IAM configuration mismatch between sites",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminRebalanceAlreadyStarted: {
|
||||
Code: "XMinioAdminRebalanceAlreadyStarted",
|
||||
Description: "Pool rebalance is already started",
|
||||
@@ -2236,8 +2242,6 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrSlowDownWrite
|
||||
case InsufficientReadQuorum:
|
||||
apiErr = ErrSlowDownRead
|
||||
case InvalidMarkerPrefixCombination:
|
||||
apiErr = ErrNotImplemented
|
||||
case InvalidUploadIDKeyCombination:
|
||||
apiErr = ErrNotImplemented
|
||||
case MalformedUploadID:
|
||||
@@ -2339,6 +2343,12 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
case dns.ErrBucketConflict:
|
||||
apiErr = ErrBucketAlreadyExists
|
||||
default:
|
||||
if _, ok := err.(tags.Error); ok {
|
||||
// tag errors are not exported, so we check their custom interface to avoid logging.
|
||||
// The correct type is inserted by toAPIError.
|
||||
apiErr = ErrInternalError
|
||||
break
|
||||
}
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
//nolint:gocritic
|
||||
|
||||
@@ -42,7 +42,6 @@ var toAPIErrorTests = []struct {
|
||||
{err: InvalidPart{}, errCode: ErrInvalidPart},
|
||||
{err: InsufficientReadQuorum{}, errCode: ErrSlowDownRead},
|
||||
{err: InsufficientWriteQuorum{}, errCode: ErrSlowDownWrite},
|
||||
{err: InvalidMarkerPrefixCombination{}, errCode: ErrNotImplemented},
|
||||
{err: InvalidUploadIDKeyCombination{}, errCode: ErrNotImplemented},
|
||||
{err: MalformedUploadID{}, errCode: ErrNoSuchUpload},
|
||||
{err: PartTooSmall{}, errCode: ErrEntityTooSmall},
|
||||
|
||||
@@ -133,11 +133,6 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
if globalCacheConfig.Enabled {
|
||||
w.Header().Set(xhttp.XCache, objInfo.CacheStatus.String())
|
||||
w.Header().Set(xhttp.XCacheLookup, objInfo.CacheLookupStatus.String())
|
||||
}
|
||||
|
||||
// Set tag count if object has tags
|
||||
if len(objInfo.UserTags) > 0 {
|
||||
tags, _ := tags.ParseObjectTags(objInfo.UserTags)
|
||||
|
||||
@@ -502,6 +502,47 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
return data
|
||||
}
|
||||
|
||||
func cleanReservedKeys(metadata map[string]string) map[string]string {
|
||||
m := cloneMSS(metadata)
|
||||
|
||||
switch kind, _ := crypto.IsEncrypted(metadata); kind {
|
||||
case crypto.S3:
|
||||
m[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionAES
|
||||
case crypto.S3KMS:
|
||||
m[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionKMS
|
||||
m[xhttp.AmzServerSideEncryptionKmsID] = kmsKeyIDFromMetadata(metadata)
|
||||
if kmsCtx, ok := metadata[crypto.MetaContext]; ok {
|
||||
m[xhttp.AmzServerSideEncryptionKmsContext] = kmsCtx
|
||||
}
|
||||
case crypto.SSEC:
|
||||
m[xhttp.AmzServerSideEncryptionCustomerAlgorithm] = xhttp.AmzEncryptionAES
|
||||
|
||||
}
|
||||
|
||||
var toRemove []string
|
||||
for k := range cleanMinioInternalMetadataKeys(m) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
toRemove = append(toRemove, k)
|
||||
continue
|
||||
}
|
||||
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
toRemove = append(toRemove, k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for _, k := range toRemove {
|
||||
delete(m, k)
|
||||
delete(m, strings.ToLower(k))
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
@@ -549,18 +590,11 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
case crypto.SSEC:
|
||||
content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES)
|
||||
}
|
||||
for k, v := range cleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
for k, v := range cleanReservedKeys(object.UserDefined) {
|
||||
content.UserMetadata.Set(k, v)
|
||||
}
|
||||
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
content.Internal = &ObjectInternalInfo{
|
||||
K: object.DataBlocks,
|
||||
M: object.ParityBlocks,
|
||||
@@ -692,18 +726,10 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
case crypto.SSEC:
|
||||
content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES)
|
||||
}
|
||||
for k, v := range cleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
for k, v := range cleanReservedKeys(object.UserDefined) {
|
||||
content.UserMetadata.Set(k, v)
|
||||
}
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
content.Internal = &ObjectInternalInfo{
|
||||
K: object.DataBlocks,
|
||||
M: object.ParityBlocks,
|
||||
@@ -978,7 +1004,7 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
||||
HostID: globalDeploymentID,
|
||||
HostID: globalDeploymentID(),
|
||||
}
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
|
||||
@@ -61,18 +61,6 @@ func newObjectLayerFn() ObjectLayer {
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newCachedObjectLayerFn() CacheObjectLayer {
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
return globalCacheObjectAPI
|
||||
}
|
||||
|
||||
func setCacheObjectLayer(c CacheObjectLayer) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalCacheObjectAPI = c
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
func setObjectLayer(o ObjectLayer) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = o
|
||||
@@ -82,7 +70,6 @@ func setObjectLayer(o ObjectLayer) {
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
CacheAPI func() CacheObjectLayer
|
||||
}
|
||||
|
||||
// getHost tries its best to return the request host.
|
||||
@@ -189,7 +176,6 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
CacheAPI: newCachedObjectLayerFn,
|
||||
}
|
||||
|
||||
// API Router
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -567,7 +567,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
|
||||
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
|
||||
// The verification happens implicit during reading.
|
||||
reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1)
|
||||
reader, err := hash.NewReader(ctx, r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
iampolicy "github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
)
|
||||
|
||||
type nullReader struct{}
|
||||
@@ -443,7 +443,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, iampolicy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,7 +232,7 @@ func (h *healingTracker) delete(ctx context.Context) error {
|
||||
pathJoin(bucketMetaPrefix, healingTrackerFilename),
|
||||
DeleteOptions{
|
||||
Recursive: false,
|
||||
Force: false,
|
||||
Immediate: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -347,7 +347,7 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
|
||||
if env.Get("_MINIO_AUTO_DISK_HEALING", config.EnableOn) == config.EnableOn {
|
||||
if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn || env.Get("_MINIO_AUTO_DISK_HEALING", config.EnableOn) == config.EnableOn {
|
||||
go monitorLocalDisksAndHeal(ctx, z)
|
||||
}
|
||||
}
|
||||
|
||||
677
cmd/batch-expire.go
Normal file
677
cmd/batch-expire.go
Normal file
@@ -0,0 +1,677 @@
|
||||
// Copyright (c) 2015-2023 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
)
|
||||
|
||||
// expire: # Expire objects that match a condition
|
||||
// apiVersion: v1
|
||||
// bucket: mybucket # Bucket where this batch job will expire matching objects from
|
||||
// prefix: myprefix # (Optional) Prefix under which this job will expire objects matching the rules below.
|
||||
// rules:
|
||||
// - type: object # regular objects with zero ore more older versions
|
||||
// name: NAME # match object names that satisfy the wildcard expression.
|
||||
// olderThan: 70h # match objects older than this value
|
||||
// createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
// tags:
|
||||
// - key: name
|
||||
// value: pick* # match objects with tag 'name', all values starting with 'pick'
|
||||
// metadata:
|
||||
// - key: content-type
|
||||
// value: image/* # match objects with 'content-type', all values starting with 'image/'
|
||||
// size:
|
||||
// lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB)
|
||||
// greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
|
||||
// purge:
|
||||
// # retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
// # retainVersions: 5 # keep the latest 5 versions of the object.
|
||||
//
|
||||
// - type: deleted # objects with delete marker as their latest version
|
||||
// name: NAME # match object names that satisfy the wildcard expression.
|
||||
// olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
|
||||
// createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
// purge:
|
||||
// # retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
// # retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
|
||||
//
|
||||
// notify:
|
||||
// endpoint: https://notify.endpoint # notification endpoint to receive job completion status
|
||||
// token: Bearer xxxxx # optional authentication token for the notification endpoint
|
||||
//
|
||||
// retry:
|
||||
// attempts: 10 # number of retries for the job before giving up
|
||||
// delay: 500ms # least amount of delay between each retry
|
||||
|
||||
//go:generate msgp -file $GOFILE
|
||||
|
||||
// BatchJobExpirePurge type accepts non-negative versions to be retained
|
||||
type BatchJobExpirePurge struct {
|
||||
RetainVersions int `yaml:"retainVersions" json:"retainVersions"`
|
||||
}
|
||||
|
||||
// Validate returns nil if value is valid, ie > 0.
|
||||
func (p BatchJobExpirePurge) Validate() error {
|
||||
if p.RetainVersions < 0 {
|
||||
return errors.New("retainVersions must be >= 0")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchJobExpireFilter holds all the filters currently supported for batch replication
|
||||
type BatchJobExpireFilter struct {
|
||||
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedBefore *time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
Size BatchJobSizeFilter `yaml:"size" json:"size"`
|
||||
Type string `yaml:"type" json:"type"`
|
||||
Name string `yaml:"name" json:"name"`
|
||||
Purge BatchJobExpirePurge `yaml:"purge" json:"purge"`
|
||||
}
|
||||
|
||||
// Matches returns true if obj matches the filter conditions specified in ef.
|
||||
func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
|
||||
switch ef.Type {
|
||||
case BatchJobExpireObject:
|
||||
if obj.DeleteMarker {
|
||||
return false
|
||||
}
|
||||
case BatchJobExpireDeleted:
|
||||
if !obj.DeleteMarker {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
// we should never come here, Validate should have caught this.
|
||||
logger.LogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type)
|
||||
return false
|
||||
}
|
||||
|
||||
if len(ef.Name) > 0 && !wildcard.Match(ef.Name, obj.Name) {
|
||||
return false
|
||||
}
|
||||
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan {
|
||||
return false
|
||||
}
|
||||
|
||||
if ef.CreatedBefore != nil && !obj.ModTime.Before(*ef.CreatedBefore) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(ef.Tags) > 0 && !obj.DeleteMarker {
|
||||
// Only parse object tags if tags filter is specified.
|
||||
var tagMap map[string]string
|
||||
if len(obj.UserTags) != 0 {
|
||||
t, err := tags.ParseObjectTags(obj.UserTags)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
tagMap = t.ToMap()
|
||||
}
|
||||
|
||||
for _, kv := range ef.Tags {
|
||||
// Object (version) must match all tags specified in
|
||||
// the filter
|
||||
var match bool
|
||||
for t, v := range tagMap {
|
||||
if kv.Match(BatchJobKV{Key: t, Value: v}) {
|
||||
match = true
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if len(ef.Metadata) > 0 && !obj.DeleteMarker {
|
||||
for _, kv := range ef.Metadata {
|
||||
// Object (version) must match all x-amz-meta and
|
||||
// standard metadata headers
|
||||
// specified in the filter
|
||||
var match bool
|
||||
for k, v := range obj.UserDefined {
|
||||
if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
|
||||
continue
|
||||
}
|
||||
// We only need to match x-amz-meta or standardHeaders
|
||||
if kv.Match(BatchJobKV{Key: k, Value: v}) {
|
||||
match = true
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ef.Size.InRange(obj.Size)
|
||||
}
|
||||
|
||||
const (
|
||||
// BatchJobExpireObject - object type
|
||||
BatchJobExpireObject string = "object"
|
||||
// BatchJobExpireDeleted - delete marker type
|
||||
BatchJobExpireDeleted string = "deleted"
|
||||
)
|
||||
|
||||
// Validate returns nil if ef has valid fields, validation error otherwise.
|
||||
func (ef BatchJobExpireFilter) Validate() error {
|
||||
switch ef.Type {
|
||||
case BatchJobExpireObject:
|
||||
case BatchJobExpireDeleted:
|
||||
if len(ef.Tags) > 0 || len(ef.Metadata) > 0 {
|
||||
return errors.New("invalid batch-expire rule filter")
|
||||
}
|
||||
default:
|
||||
return errors.New("invalid batch-expire type")
|
||||
}
|
||||
|
||||
for _, tag := range ef.Tags {
|
||||
if err := tag.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, meta := range ef.Metadata {
|
||||
if err := meta.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := ef.Purge.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ef.Size.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if ef.CreatedBefore != nil && !ef.CreatedBefore.Before(time.Now()) {
|
||||
return errors.New("CreatedBefore is in the future")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchJobExpire represents configuration parameters for a batch expiration
|
||||
// job typically supplied in yaml form
|
||||
type BatchJobExpire struct {
|
||||
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
|
||||
Bucket string `yaml:"bucket" json:"bucket"`
|
||||
Prefix string `yaml:"prefix" json:"prefix"`
|
||||
NotificationCfg BatchJobNotification `yaml:"notify" json:"notify"`
|
||||
Retry BatchJobRetry `yaml:"retry" json:"retry"`
|
||||
Rules []BatchJobExpireFilter `yaml:"rules" json:"rules"`
|
||||
}
|
||||
|
||||
// Notify notifies notification endpoint if configured regarding job failure or success.
|
||||
func (r BatchJobExpire) Notify(ctx context.Context, body io.Reader) error {
|
||||
if r.NotificationCfg.Endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, r.NotificationCfg.Endpoint, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.NotificationCfg.Token != "" {
|
||||
req.Header.Set("Authorization", r.NotificationCfg.Token)
|
||||
}
|
||||
|
||||
clnt := http.Client{Transport: getRemoteInstanceTransport}
|
||||
resp, err := clnt.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
xhttp.DrainBody(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return errors.New(resp.Status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Expire expires object versions which have already matched supplied filter conditions
|
||||
func (r *BatchJobExpire) Expire(ctx context.Context, api ObjectLayer, vc *versioning.Versioning, objsToDel []ObjectToDelete) []error {
|
||||
opts := ObjectOptions{
|
||||
PrefixEnabledFn: vc.PrefixEnabled,
|
||||
VersionSuspended: vc.Suspended(),
|
||||
}
|
||||
_, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel, opts)
|
||||
return errs
|
||||
}
|
||||
|
||||
const (
|
||||
batchExpireName = "batch-expire.bin"
|
||||
batchExpireFormat = 1
|
||||
batchExpireVersionV1 = 1
|
||||
batchExpireVersion = batchExpireVersionV1
|
||||
batchExpireAPIVersion = "v1"
|
||||
batchExpireJobDefaultRetries = 3
|
||||
batchExpireJobDefaultRetryDelay = 250 * time.Millisecond
|
||||
)
|
||||
|
||||
type objInfoCache map[string]*ObjectInfo
|
||||
|
||||
func newObjInfoCache() objInfoCache {
|
||||
return objInfoCache(make(map[string]*ObjectInfo))
|
||||
}
|
||||
|
||||
func (oiCache objInfoCache) Add(toDel ObjectToDelete, oi *ObjectInfo) {
|
||||
oiCache[fmt.Sprintf("%s-%s", toDel.ObjectName, toDel.VersionID)] = oi
|
||||
}
|
||||
|
||||
func (oiCache objInfoCache) Get(toDel ObjectToDelete) (*ObjectInfo, bool) {
|
||||
oi, ok := oiCache[fmt.Sprintf("%s-%s", toDel.ObjectName, toDel.VersionID)]
|
||||
return oi, ok
|
||||
}
|
||||
|
||||
func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo, job BatchJobRequest, api ObjectLayer, wk *workers.Workers, expireCh <-chan []expireObjInfo) {
|
||||
vc, _ := globalBucketVersioningSys.Get(r.Bucket)
|
||||
retryAttempts := r.Retry.Attempts
|
||||
delay := job.Expire.Retry.Delay
|
||||
if delay == 0 {
|
||||
delay = batchExpireJobDefaultRetryDelay
|
||||
}
|
||||
|
||||
var i int
|
||||
for toExpire := range expireCh {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
if i > 0 {
|
||||
if wait := globalBatchConfig.ExpirationWait(); wait > 0 {
|
||||
time.Sleep(wait)
|
||||
}
|
||||
}
|
||||
i++
|
||||
wk.Take()
|
||||
go func(toExpire []expireObjInfo) {
|
||||
defer wk.Give()
|
||||
|
||||
toExpireAll := make([]ObjectInfo, 0, len(toExpire))
|
||||
toDel := make([]ObjectToDelete, 0, len(toExpire))
|
||||
oiCache := newObjInfoCache()
|
||||
for _, exp := range toExpire {
|
||||
if exp.ExpireAll {
|
||||
toExpireAll = append(toExpireAll, exp.ObjectInfo)
|
||||
continue
|
||||
}
|
||||
// Cache ObjectInfo value via pointers for
|
||||
// subsequent use to track objects which
|
||||
// couldn't be deleted.
|
||||
od := ObjectToDelete{
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: exp.Name,
|
||||
VersionID: exp.VersionID,
|
||||
},
|
||||
}
|
||||
toDel = append(toDel, od)
|
||||
oiCache.Add(od, &exp.ObjectInfo)
|
||||
}
|
||||
|
||||
var done bool
|
||||
// DeleteObject(deletePrefix: true) to expire all versions of an object
|
||||
for _, exp := range toExpireAll {
|
||||
var success bool
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
default:
|
||||
}
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
|
||||
_, err := api.DeleteObject(ctx, exp.Bucket, exp.Name, ObjectOptions{
|
||||
DeletePrefix: true,
|
||||
})
|
||||
if err != nil {
|
||||
stopFn(exp, err)
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts))
|
||||
} else {
|
||||
stopFn(exp, err)
|
||||
success = true
|
||||
break
|
||||
}
|
||||
}
|
||||
ri.trackMultipleObjectVersions(r.Bucket, exp, success)
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteMultiple objects
|
||||
toDelCopy := make([]ObjectToDelete, len(toDel))
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
|
||||
// Copying toDel to select from objects whose
|
||||
// deletion failed
|
||||
copy(toDelCopy, toDel)
|
||||
var failed int
|
||||
errs := r.Expire(ctx, api, vc, toDel)
|
||||
// reslice toDel in preparation for next retry
|
||||
// attempt
|
||||
toDel = toDel[:0]
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
stopFn(toDelCopy[i], err)
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts))
|
||||
failed++
|
||||
if attempts == retryAttempts { // all retry attempts failed, record failure
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, false)
|
||||
}
|
||||
} else {
|
||||
toDel = append(toDel, toDelCopy[i])
|
||||
}
|
||||
} else {
|
||||
stopFn(toDelCopy[i], nil)
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
globalBatchJobsMetrics.save(ri.JobID, ri)
|
||||
|
||||
if failed == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Add a delay between retry attempts
|
||||
if attempts < retryAttempts {
|
||||
time.Sleep(delay)
|
||||
}
|
||||
}
|
||||
}(toExpire)
|
||||
}
|
||||
}
|
||||
|
||||
type expireObjInfo struct {
|
||||
ObjectInfo
|
||||
ExpireAll bool
|
||||
}
|
||||
|
||||
// Start the batch expiration job, resumes if there was a pending job via "job.ID"
|
||||
func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
|
||||
ri := &batchJobInfo{
|
||||
JobID: job.ID,
|
||||
JobType: string(job.Type()),
|
||||
StartTime: job.Started,
|
||||
}
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
lastObject := ri.Object
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_EXPIRATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wk, err := workers.New(workerSize)
|
||||
if err != nil {
|
||||
// invalid worker size.
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
results := make(chan ObjectInfo, workerSize)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
|
||||
VersionsSort: WalkVersionsSortDesc,
|
||||
}); err != nil {
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
return err
|
||||
}
|
||||
|
||||
// Goroutine to periodically save batch-expire job's in-memory state
|
||||
saverQuitCh := make(chan struct{})
|
||||
go func() {
|
||||
saveTicker := time.NewTicker(10 * time.Second)
|
||||
defer saveTicker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-saveTicker.C:
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
|
||||
case <-ctx.Done():
|
||||
// persist in-memory state immediately before exiting due to context cancelation.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
|
||||
case <-saverQuitCh:
|
||||
// persist in-memory state immediately to disk.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
expireCh := make(chan []expireObjInfo, workerSize)
|
||||
go batchObjsForDelete(ctx, r, ri, job, api, wk, expireCh)
|
||||
|
||||
var (
|
||||
prevObj ObjectInfo
|
||||
matchedFilter BatchJobExpireFilter
|
||||
versionsCount int
|
||||
toDel []expireObjInfo
|
||||
)
|
||||
for result := range results {
|
||||
// Apply filter to find the matching rule to apply expiry
|
||||
// actions accordingly.
|
||||
// nolint:gocritic
|
||||
if result.IsLatest {
|
||||
// send down filtered entries to be deleted using
|
||||
// DeleteObjects method
|
||||
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
|
||||
xfer := make([]expireObjInfo, len(toDel))
|
||||
copy(xfer, toDel)
|
||||
|
||||
var done bool
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
case expireCh <- xfer:
|
||||
toDel = toDel[:0] // resetting toDel
|
||||
}
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
var match BatchJobExpireFilter
|
||||
var found bool
|
||||
for _, rule := range r.Rules {
|
||||
if rule.Matches(result, now) {
|
||||
match = rule
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
prevObj = result
|
||||
matchedFilter = match
|
||||
versionsCount = 1
|
||||
// Include the latest version
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result,
|
||||
ExpireAll: true,
|
||||
})
|
||||
continue
|
||||
}
|
||||
} else if prevObj.Name == result.Name {
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
continue // including latest version in toDel suffices, skipping other versions
|
||||
}
|
||||
versionsCount++
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
if versionsCount <= matchedFilter.Purge.RetainVersions {
|
||||
continue // retain versions
|
||||
}
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result,
|
||||
})
|
||||
}
|
||||
// Send any remaining objects downstream
|
||||
if len(toDel) > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case expireCh <- toDel:
|
||||
}
|
||||
}
|
||||
close(expireCh)
|
||||
|
||||
wk.Wait() // waits for all expire goroutines to complete
|
||||
|
||||
ri.Complete = ri.ObjectsFailed == 0
|
||||
ri.Failed = ri.ObjectsFailed > 0
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
|
||||
// Close the saverQuitCh - this also triggers saving in-memory state
|
||||
// immediately one last time before we exit this method.
|
||||
close(saverQuitCh)
|
||||
|
||||
// Notify expire jobs final status to the configured endpoint
|
||||
buf, _ := json.Marshal(ri)
|
||||
if err := r.Notify(context.Background(), bytes.NewReader(buf)); err != nil {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("unable to notify %v", err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//msgp:ignore batchExpireJobError
|
||||
type batchExpireJobError struct {
|
||||
Code string
|
||||
Description string
|
||||
HTTPStatusCode int
|
||||
}
|
||||
|
||||
func (e batchExpireJobError) Error() string {
|
||||
return e.Description
|
||||
}
|
||||
|
||||
// maxBatchRules maximum number of rules a batch-expiry job supports
|
||||
const maxBatchRules = 50
|
||||
|
||||
// Validate validates the job definition input
|
||||
func (r *BatchJobExpire) Validate(ctx context.Context, job BatchJobRequest, o ObjectLayer) error {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.APIVersion != batchExpireAPIVersion {
|
||||
return batchExpireJobError{
|
||||
Code: "InvalidArgument",
|
||||
Description: "Unsupported batch expire API version",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if r.Bucket == "" {
|
||||
return batchExpireJobError{
|
||||
Code: "InvalidArgument",
|
||||
Description: "Bucket argument missing",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := o.GetBucketInfo(ctx, r.Bucket, BucketOptions{}); err != nil {
|
||||
if isErrBucketNotFound(err) {
|
||||
return batchExpireJobError{
|
||||
Code: "NoSuchSourceBucket",
|
||||
Description: "The specified source bucket does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if len(r.Rules) > maxBatchRules {
|
||||
return batchExpireJobError{
|
||||
Code: "InvalidArgument",
|
||||
Description: "Too many rules. Batch expire job can't have more than 100 rules",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
for _, rule := range r.Rules {
|
||||
if err := rule.Validate(); err != nil {
|
||||
return batchExpireJobError{
|
||||
Code: "InvalidArgument",
|
||||
Description: fmt.Sprintf("Invalid batch expire rule: %s", err),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.Retry.Validate(); err != nil {
|
||||
return batchExpireJobError{
|
||||
Code: "InvalidArgument",
|
||||
Description: fmt.Sprintf("Invalid batch expire retry configuration: %s", err),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
856
cmd/batch-expire_gen.go
Normal file
856
cmd/batch-expire_gen.go
Normal file
@@ -0,0 +1,856 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobExpire) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "APIVersion":
|
||||
z.APIVersion, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APIVersion")
|
||||
return
|
||||
}
|
||||
case "Bucket":
|
||||
z.Bucket, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
case "NotificationCfg":
|
||||
err = z.NotificationCfg.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationCfg")
|
||||
return
|
||||
}
|
||||
case "Retry":
|
||||
err = z.Retry.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Retry")
|
||||
return
|
||||
}
|
||||
case "Rules":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules")
|
||||
return
|
||||
}
|
||||
if cap(z.Rules) >= int(zb0002) {
|
||||
z.Rules = (z.Rules)[:zb0002]
|
||||
} else {
|
||||
z.Rules = make([]BatchJobExpireFilter, zb0002)
|
||||
}
|
||||
for za0001 := range z.Rules {
|
||||
err = z.Rules[za0001].DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BatchJobExpire) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 6
|
||||
// write "APIVersion"
|
||||
err = en.Append(0x86, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.APIVersion)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APIVersion")
|
||||
return
|
||||
}
|
||||
// write "Bucket"
|
||||
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Bucket)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
// write "Prefix"
|
||||
err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Prefix)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
// write "NotificationCfg"
|
||||
err = en.Append(0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x66, 0x67)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.NotificationCfg.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationCfg")
|
||||
return
|
||||
}
|
||||
// write "Retry"
|
||||
err = en.Append(0xa5, 0x52, 0x65, 0x74, 0x72, 0x79)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.Retry.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Retry")
|
||||
return
|
||||
}
|
||||
// write "Rules"
|
||||
err = en.Append(0xa5, 0x52, 0x75, 0x6c, 0x65, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.Rules)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules")
|
||||
return
|
||||
}
|
||||
for za0001 := range z.Rules {
|
||||
err = z.Rules[za0001].EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BatchJobExpire) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 6
|
||||
// string "APIVersion"
|
||||
o = append(o, 0x86, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
|
||||
o = msgp.AppendString(o, z.APIVersion)
|
||||
// string "Bucket"
|
||||
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
|
||||
o = msgp.AppendString(o, z.Bucket)
|
||||
// string "Prefix"
|
||||
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
o = msgp.AppendString(o, z.Prefix)
|
||||
// string "NotificationCfg"
|
||||
o = append(o, 0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x66, 0x67)
|
||||
o, err = z.NotificationCfg.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationCfg")
|
||||
return
|
||||
}
|
||||
// string "Retry"
|
||||
o = append(o, 0xa5, 0x52, 0x65, 0x74, 0x72, 0x79)
|
||||
o, err = z.Retry.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Retry")
|
||||
return
|
||||
}
|
||||
// string "Rules"
|
||||
o = append(o, 0xa5, 0x52, 0x75, 0x6c, 0x65, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.Rules)))
|
||||
for za0001 := range z.Rules {
|
||||
o, err = z.Rules[za0001].MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "APIVersion":
|
||||
z.APIVersion, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APIVersion")
|
||||
return
|
||||
}
|
||||
case "Bucket":
|
||||
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
case "NotificationCfg":
|
||||
bts, err = z.NotificationCfg.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationCfg")
|
||||
return
|
||||
}
|
||||
case "Retry":
|
||||
bts, err = z.Retry.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Retry")
|
||||
return
|
||||
}
|
||||
case "Rules":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules")
|
||||
return
|
||||
}
|
||||
if cap(z.Rules) >= int(zb0002) {
|
||||
z.Rules = (z.Rules)[:zb0002]
|
||||
} else {
|
||||
z.Rules = make([]BatchJobExpireFilter, zb0002)
|
||||
}
|
||||
for za0001 := range z.Rules {
|
||||
bts, err = z.Rules[za0001].UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobExpire) Msgsize() (s int) {
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Rules {
|
||||
s += z.Rules[za0001].Msgsize()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobExpireFilter) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "OlderThan":
|
||||
z.OlderThan, err = dc.ReadDuration()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
case "CreatedBefore":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CreatedBefore")
|
||||
return
|
||||
}
|
||||
z.CreatedBefore = nil
|
||||
} else {
|
||||
if z.CreatedBefore == nil {
|
||||
z.CreatedBefore = new(time.Time)
|
||||
}
|
||||
*z.CreatedBefore, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CreatedBefore")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Tags":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags")
|
||||
return
|
||||
}
|
||||
if cap(z.Tags) >= int(zb0002) {
|
||||
z.Tags = (z.Tags)[:zb0002]
|
||||
} else {
|
||||
z.Tags = make([]BatchJobKV, zb0002)
|
||||
}
|
||||
for za0001 := range z.Tags {
|
||||
err = z.Tags[za0001].DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Metadata":
|
||||
var zb0003 uint32
|
||||
zb0003, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata")
|
||||
return
|
||||
}
|
||||
if cap(z.Metadata) >= int(zb0003) {
|
||||
z.Metadata = (z.Metadata)[:zb0003]
|
||||
} else {
|
||||
z.Metadata = make([]BatchJobKV, zb0003)
|
||||
}
|
||||
for za0002 := range z.Metadata {
|
||||
err = z.Metadata[za0002].DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Size":
|
||||
err = z.Size.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Size")
|
||||
return
|
||||
}
|
||||
case "Type":
|
||||
z.Type, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Type")
|
||||
return
|
||||
}
|
||||
case "Name":
|
||||
z.Name, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
case "Purge":
|
||||
var zb0004 uint32
|
||||
zb0004, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge")
|
||||
return
|
||||
}
|
||||
for zb0004 > 0 {
|
||||
zb0004--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge")
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "RetainVersions":
|
||||
z.Purge.RetainVersions, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge", "RetainVersions")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BatchJobExpireFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 8
|
||||
// write "OlderThan"
|
||||
err = en.Append(0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.OlderThan)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
// write "CreatedBefore"
|
||||
err = en.Append(0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.CreatedBefore == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteTime(*z.CreatedBefore)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CreatedBefore")
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Tags"
|
||||
err = en.Append(0xa4, 0x54, 0x61, 0x67, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.Tags)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags")
|
||||
return
|
||||
}
|
||||
for za0001 := range z.Tags {
|
||||
err = z.Tags[za0001].EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Metadata"
|
||||
err = en.Append(0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.Metadata)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata")
|
||||
return
|
||||
}
|
||||
for za0002 := range z.Metadata {
|
||||
err = z.Metadata[za0002].EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Size"
|
||||
err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.Size.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Size")
|
||||
return
|
||||
}
|
||||
// write "Type"
|
||||
err = en.Append(0xa4, 0x54, 0x79, 0x70, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Type)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Type")
|
||||
return
|
||||
}
|
||||
// write "Name"
|
||||
err = en.Append(0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Name)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
// write "Purge"
|
||||
err = en.Append(0xa5, 0x50, 0x75, 0x72, 0x67, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// map header, size 1
|
||||
// write "RetainVersions"
|
||||
err = en.Append(0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.Purge.RetainVersions)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge", "RetainVersions")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BatchJobExpireFilter) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 8
|
||||
// string "OlderThan"
|
||||
o = append(o, 0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.OlderThan)
|
||||
// string "CreatedBefore"
|
||||
o = append(o, 0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65)
|
||||
if z.CreatedBefore == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendTime(o, *z.CreatedBefore)
|
||||
}
|
||||
// string "Tags"
|
||||
o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.Tags)))
|
||||
for za0001 := range z.Tags {
|
||||
o, err = z.Tags[za0001].MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
// string "Metadata"
|
||||
o = append(o, 0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.Metadata)))
|
||||
for za0002 := range z.Metadata {
|
||||
o, err = z.Metadata[za0002].MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
// string "Size"
|
||||
o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65)
|
||||
o, err = z.Size.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Size")
|
||||
return
|
||||
}
|
||||
// string "Type"
|
||||
o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65)
|
||||
o = msgp.AppendString(o, z.Type)
|
||||
// string "Name"
|
||||
o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
// string "Purge"
|
||||
o = append(o, 0xa5, 0x50, 0x75, 0x72, 0x67, 0x65)
|
||||
// map header, size 1
|
||||
// string "RetainVersions"
|
||||
o = append(o, 0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
|
||||
o = msgp.AppendInt(o, z.Purge.RetainVersions)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "OlderThan":
|
||||
z.OlderThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
case "CreatedBefore":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.CreatedBefore = nil
|
||||
} else {
|
||||
if z.CreatedBefore == nil {
|
||||
z.CreatedBefore = new(time.Time)
|
||||
}
|
||||
*z.CreatedBefore, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CreatedBefore")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Tags":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags")
|
||||
return
|
||||
}
|
||||
if cap(z.Tags) >= int(zb0002) {
|
||||
z.Tags = (z.Tags)[:zb0002]
|
||||
} else {
|
||||
z.Tags = make([]BatchJobKV, zb0002)
|
||||
}
|
||||
for za0001 := range z.Tags {
|
||||
bts, err = z.Tags[za0001].UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Metadata":
|
||||
var zb0003 uint32
|
||||
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata")
|
||||
return
|
||||
}
|
||||
if cap(z.Metadata) >= int(zb0003) {
|
||||
z.Metadata = (z.Metadata)[:zb0003]
|
||||
} else {
|
||||
z.Metadata = make([]BatchJobKV, zb0003)
|
||||
}
|
||||
for za0002 := range z.Metadata {
|
||||
bts, err = z.Metadata[za0002].UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Size":
|
||||
bts, err = z.Size.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Size")
|
||||
return
|
||||
}
|
||||
case "Type":
|
||||
z.Type, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Type")
|
||||
return
|
||||
}
|
||||
case "Name":
|
||||
z.Name, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
case "Purge":
|
||||
var zb0004 uint32
|
||||
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge")
|
||||
return
|
||||
}
|
||||
for zb0004 > 0 {
|
||||
zb0004--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge")
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "RetainVersions":
|
||||
z.Purge.RetainVersions, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge", "RetainVersions")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobExpireFilter) Msgsize() (s int) {
|
||||
s = 1 + 10 + msgp.DurationSize + 14
|
||||
if z.CreatedBefore == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.TimeSize
|
||||
}
|
||||
s += 5 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Tags {
|
||||
s += z.Tags[za0001].Msgsize()
|
||||
}
|
||||
s += 9 + msgp.ArrayHeaderSize
|
||||
for za0002 := range z.Metadata {
|
||||
s += z.Metadata[za0002].Msgsize()
|
||||
}
|
||||
s += 5 + z.Size.Msgsize() + 5 + msgp.StringPrefixSize + len(z.Type) + 5 + msgp.StringPrefixSize + len(z.Name) + 6 + 1 + 15 + msgp.IntSize
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobExpirePurge) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "RetainVersions":
|
||||
z.RetainVersions, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetainVersions")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z BatchJobExpirePurge) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
// write "RetainVersions"
|
||||
err = en.Append(0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.RetainVersions)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetainVersions")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z BatchJobExpirePurge) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// string "RetainVersions"
|
||||
o = append(o, 0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
|
||||
o = msgp.AppendInt(o, z.RetainVersions)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobExpirePurge) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "RetainVersions":
|
||||
z.RetainVersions, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetainVersions")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z BatchJobExpirePurge) Msgsize() (s int) {
|
||||
s = 1 + 15 + msgp.IntSize
|
||||
return
|
||||
}
|
||||
349
cmd/batch-expire_gen_test.go
Normal file
349
cmd/batch-expire_gen_test.go
Normal file
@@ -0,0 +1,349 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalBatchJobExpire(t *testing.T) {
|
||||
v := BatchJobExpire{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobExpire(b *testing.B) {
|
||||
v := BatchJobExpire{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobExpire(b *testing.B) {
|
||||
v := BatchJobExpire{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobExpire(b *testing.B) {
|
||||
v := BatchJobExpire{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobExpire(t *testing.T) {
|
||||
v := BatchJobExpire{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobExpire Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobExpire{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobExpire(b *testing.B) {
|
||||
v := BatchJobExpire{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobExpire(b *testing.B) {
|
||||
v := BatchJobExpire{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBatchJobExpireFilter(t *testing.T) {
|
||||
v := BatchJobExpireFilter{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobExpireFilter(b *testing.B) {
|
||||
v := BatchJobExpireFilter{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobExpireFilter(b *testing.B) {
|
||||
v := BatchJobExpireFilter{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobExpireFilter(b *testing.B) {
|
||||
v := BatchJobExpireFilter{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobExpireFilter(t *testing.T) {
|
||||
v := BatchJobExpireFilter{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobExpireFilter Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobExpireFilter{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobExpireFilter(b *testing.B) {
|
||||
v := BatchJobExpireFilter{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobExpireFilter(b *testing.B) {
|
||||
v := BatchJobExpireFilter{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBatchJobExpirePurge(t *testing.T) {
|
||||
v := BatchJobExpirePurge{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobExpirePurge(b *testing.B) {
|
||||
v := BatchJobExpirePurge{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobExpirePurge(b *testing.B) {
|
||||
v := BatchJobExpirePurge{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobExpirePurge(b *testing.B) {
|
||||
v := BatchJobExpirePurge{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobExpirePurge(t *testing.T) {
|
||||
v := BatchJobExpirePurge{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobExpirePurge Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobExpirePurge{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobExpirePurge(b *testing.B) {
|
||||
v := BatchJobExpirePurge{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobExpirePurge(b *testing.B) {
|
||||
v := BatchJobExpirePurge{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
71
cmd/batch-expire_test.go
Normal file
71
cmd/batch-expire_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright (c) 2015-2023 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func TestParseBatchJobExpire(t *testing.T) {
|
||||
expireYaml := `
|
||||
expire: # Expire objects that match a condition
|
||||
apiVersion: v1
|
||||
bucket: mybucket # Bucket where this batch job will expire matching objects from
|
||||
prefix: myprefix # (Optional) Prefix under which this job will expire objects matching the rules below.
|
||||
rules:
|
||||
- type: object # regular objects with zero ore more older versions
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 70h # match objects older than this value
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
tags:
|
||||
- key: name
|
||||
value: pick* # match objects with tag 'name', all values starting with 'pick'
|
||||
metadata:
|
||||
- key: content-type
|
||||
value: image/* # match objects with 'content-type', all values starting with 'image/'
|
||||
size:
|
||||
lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB)
|
||||
greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
|
||||
purge:
|
||||
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
# retainVersions: 5 # keep the latest 5 versions of the object.
|
||||
|
||||
- type: deleted # objects with delete marker as their latest version
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
purge:
|
||||
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
# retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
|
||||
|
||||
notify:
|
||||
endpoint: https://notify.endpoint # notification endpoint to receive job completion status
|
||||
token: Bearer xxxxx # optional authentication token for the notification endpoint
|
||||
|
||||
retry:
|
||||
attempts: 10 # number of retries for the job before giving up
|
||||
delay: 500ms # least amount of delay between each retry
|
||||
`
|
||||
var job BatchJobRequest
|
||||
err := yaml.UnmarshalStrict([]byte(expireYaml), &job)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml", err)
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -38,10 +37,12 @@ import (
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/lithammer/shortuuid/v4"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/config/batch"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
@@ -49,11 +50,13 @@ import (
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/console"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
iampolicy "github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var globalBatchConfig batch.Config
|
||||
|
||||
// BatchJobRequest this is an internal data structure not for external consumption.
|
||||
type BatchJobRequest struct {
|
||||
ID string `yaml:"-" json:"name"`
|
||||
@@ -62,6 +65,7 @@ type BatchJobRequest struct {
|
||||
Location string `yaml:"-" json:"location"`
|
||||
Replicate *BatchJobReplicateV1 `yaml:"replicate" json:"replicate"`
|
||||
KeyRotate *BatchJobKeyRotateV1 `yaml:"keyrotate" json:"keyrotate"`
|
||||
Expire *BatchJobExpire `yaml:"expire" json:"expire"`
|
||||
ctx context.Context `msg:"-"`
|
||||
}
|
||||
|
||||
@@ -114,7 +118,7 @@ func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api Objec
|
||||
srcObject := srcObjInfo.Name
|
||||
tgtObject := srcObjInfo.Name
|
||||
if r.Target.Prefix != "" {
|
||||
tgtObject = path.Join(r.Target.Prefix, srcObjInfo.Name)
|
||||
tgtObject = pathJoin(r.Target.Prefix, srcObjInfo.Name)
|
||||
}
|
||||
|
||||
versionID := srcObjInfo.VersionID
|
||||
@@ -163,11 +167,11 @@ func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api Objec
|
||||
}
|
||||
rd, objInfo, _, err := core.GetObject(ctx, srcBucket, srcObject, gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
return ErrorRespToObjectError(err, srcBucket, srcObject, srcObjInfo.VersionID)
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
hr, err := hash.NewReader(rd, objInfo.Size, "", "", objInfo.Size)
|
||||
hr, err := hash.NewReader(ctx, rd, objInfo.Size, "", "", objInfo.Size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -182,7 +186,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
|
||||
srcObject := srcObjInfo.Name
|
||||
tgtObject := srcObjInfo.Name
|
||||
if r.Target.Prefix != "" {
|
||||
tgtObject = path.Join(r.Target.Prefix, srcObjInfo.Name)
|
||||
tgtObject = pathJoin(r.Target.Prefix, srcObjInfo.Name)
|
||||
}
|
||||
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
|
||||
opts.VersionID = ""
|
||||
@@ -226,11 +230,11 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
|
||||
}
|
||||
rd, objInfo, _, err := c.GetObject(ctx, srcBucket, srcObject, gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
return ErrorRespToObjectError(err, srcBucket, srcObject, srcObjInfo.VersionID)
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
hr, err = hash.NewReader(io.LimitReader(rd, objInfo.Size), objInfo.Size, "", "", objInfo.Size)
|
||||
hr, err = hash.NewReader(ctx, io.LimitReader(rd, objInfo.Size), objInfo.Size, "", "", objInfo.Size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -262,6 +266,9 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
if ri.Complete {
|
||||
return nil
|
||||
}
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
|
||||
delay := job.Replicate.Flags.Retry.Delay
|
||||
@@ -270,6 +277,10 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
}
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
isTags := len(r.Flags.Filter.Tags) != 0
|
||||
isMetadata := len(r.Flags.Filter.Metadata) != 0
|
||||
isStorageClassOnly := len(r.Flags.Filter.Metadata) == 1 && strings.EqualFold(r.Flags.Filter.Metadata[0].Key, xhttp.AmzStorageClass)
|
||||
|
||||
skip := func(oi ObjectInfo) (ok bool) {
|
||||
if r.Flags.Filter.OlderThan > 0 && time.Since(oi.ModTime) < r.Flags.Filter.OlderThan {
|
||||
// skip all objects that are newer than specified older duration
|
||||
@@ -290,7 +301,8 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
// skip all objects that are created after the specified time.
|
||||
return true
|
||||
}
|
||||
if len(r.Flags.Filter.Tags) > 0 {
|
||||
|
||||
if isTags {
|
||||
// Only parse object tags if tags filter is specified.
|
||||
tagMap := map[string]string{}
|
||||
tagStr := oi.UserTags
|
||||
@@ -313,23 +325,19 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
return false
|
||||
}
|
||||
|
||||
if len(r.Flags.Filter.Metadata) > 0 {
|
||||
for _, kv := range r.Flags.Filter.Metadata {
|
||||
for k, v := range oi.UserDefined {
|
||||
if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
|
||||
continue
|
||||
}
|
||||
// We only need to match x-amz-meta or standardHeaders
|
||||
if kv.Match(BatchJobKV{Key: k, Value: v}) {
|
||||
return true
|
||||
}
|
||||
for _, kv := range r.Flags.Filter.Metadata {
|
||||
for k, v := range oi.UserDefined {
|
||||
if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
|
||||
continue
|
||||
}
|
||||
// We only need to match x-amz-meta or standardHeaders
|
||||
if kv.Match(BatchJobKV{Key: k, Value: v}) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// None of the provided metadata filters match skip the object.
|
||||
return false
|
||||
}
|
||||
|
||||
// None of the provided filters match
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -384,17 +392,32 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
for obj := range objInfoCh {
|
||||
oi := toObjectInfo(r.Source.Bucket, obj.Key, obj)
|
||||
if !minioSrc {
|
||||
oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, miniogo.StatObjectOptions{})
|
||||
if err == nil {
|
||||
oi = toObjectInfo(r.Source.Bucket, obj.Key, oi2)
|
||||
} else {
|
||||
if isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) ||
|
||||
isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) {
|
||||
// Check if metadata filter was requested and it is expected to have
|
||||
// all user metadata or just storageClass. If its only storageClass
|
||||
// List() already returns relevant information for filter to be applied.
|
||||
if isMetadata && !isStorageClassOnly {
|
||||
oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, miniogo.StatObjectOptions{})
|
||||
if err == nil {
|
||||
oi = toObjectInfo(r.Source.Bucket, obj.Key, oi2)
|
||||
} else {
|
||||
if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) &&
|
||||
!isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if isTags {
|
||||
tags, err := c.GetObjectTagging(ctx, r.Source.Bucket, obj.Key, minio.GetObjectTaggingOptions{})
|
||||
if err == nil {
|
||||
oi.UserTags = tags.String()
|
||||
} else {
|
||||
if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) &&
|
||||
!isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
}
|
||||
if skip(oi) {
|
||||
@@ -412,23 +435,27 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
wk.Take()
|
||||
go func() {
|
||||
defer wk.Give()
|
||||
stopFn := globalBatchJobsMetrics.trace(batchReplicationMetricObject, job.ID, attempts, oi)
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricReplication, job.ID, attempts)
|
||||
success := true
|
||||
if err := r.ReplicateFromSource(ctx, api, core, oi, retry); err != nil {
|
||||
// object must be deleted concurrently, allow these failures but do not count them
|
||||
if isErrVersionNotFound(err) || isErrObjectNotFound(err) {
|
||||
return
|
||||
}
|
||||
stopFn(err)
|
||||
stopFn(oi, err)
|
||||
logger.LogIf(ctx, err)
|
||||
success = false
|
||||
} else {
|
||||
stopFn(nil)
|
||||
stopFn(oi, nil)
|
||||
}
|
||||
ri.trackCurrentBucketObject(r.Target.Bucket, oi, success)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
|
||||
if wait := globalBatchConfig.ReplicationWait(); wait > 0 {
|
||||
time.Sleep(wait)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wk.Wait()
|
||||
@@ -482,10 +509,12 @@ func toObjectInfo(bucket, object string, objInfo miniogo.ObjectInfo) ObjectInfo
|
||||
ReplicationStatusInternal: objInfo.ReplicationStatus,
|
||||
UserTags: tags.String(),
|
||||
}
|
||||
|
||||
oi.UserDefined = make(map[string]string, len(objInfo.Metadata))
|
||||
for k, v := range objInfo.Metadata {
|
||||
oi.UserDefined[k] = v[0]
|
||||
}
|
||||
|
||||
ce, ok := oi.UserDefined[xhttp.ContentEncoding]
|
||||
if !ok {
|
||||
ce, ok = oi.UserDefined[strings.ToLower(xhttp.ContentEncoding)]
|
||||
@@ -493,9 +522,74 @@ func toObjectInfo(bucket, object string, objInfo miniogo.ObjectInfo) ObjectInfo
|
||||
if ok {
|
||||
oi.ContentEncoding = ce
|
||||
}
|
||||
|
||||
_, ok = oi.UserDefined[xhttp.AmzStorageClass]
|
||||
if !ok {
|
||||
oi.UserDefined[xhttp.AmzStorageClass] = objInfo.StorageClass
|
||||
}
|
||||
|
||||
for k, v := range objInfo.UserMetadata {
|
||||
oi.UserDefined[k] = v
|
||||
}
|
||||
|
||||
return oi
|
||||
}
|
||||
|
||||
func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLayer, remoteClnt *minio.Client, entries []ObjectInfo) error {
|
||||
input := make(chan minio.SnowballObject, 1)
|
||||
opts := minio.SnowballOptions{
|
||||
Opts: minio.PutObjectOptions{},
|
||||
InMemory: *r.Source.Snowball.InMemory,
|
||||
Compress: *r.Source.Snowball.Compress,
|
||||
SkipErrs: *r.Source.Snowball.SkipErrs,
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(input)
|
||||
|
||||
for _, entry := range entries {
|
||||
gr, err := objAPI.GetObjectNInfo(ctx, r.Source.Bucket,
|
||||
entry.Name, nil, nil, ObjectOptions{
|
||||
VersionID: entry.VersionID,
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
snowballObj := minio.SnowballObject{
|
||||
// Create path to store objects within the bucket.
|
||||
Key: entry.Name,
|
||||
Size: entry.Size,
|
||||
ModTime: entry.ModTime,
|
||||
VersionID: entry.VersionID,
|
||||
Content: gr,
|
||||
Headers: make(http.Header),
|
||||
Close: func() {
|
||||
gr.Close()
|
||||
},
|
||||
}
|
||||
|
||||
opts, err := batchReplicationOpts(ctx, "", gr.ObjInfo)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for k, vals := range opts.Header() {
|
||||
for _, v := range vals {
|
||||
snowballObj.Headers.Add(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
input <- snowballObj
|
||||
}
|
||||
}()
|
||||
|
||||
// Collect and upload all entries.
|
||||
return remoteClnt.PutObjectsSnowball(ctx, r.Target.Bucket, opts, input)
|
||||
}
|
||||
|
||||
// ReplicateToTarget read from source and replicate to configured target
|
||||
func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error {
|
||||
srcBucket := r.Source.Bucket
|
||||
@@ -639,7 +733,12 @@ func (ri *batchJobInfo) load(ctx context.Context, api ObjectLayer, job BatchJobR
|
||||
fileName = batchKeyRotationName
|
||||
version = batchKeyRotateVersionV1
|
||||
format = batchKeyRotationFormat
|
||||
|
||||
case job.Expire != nil:
|
||||
fileName = batchExpireName
|
||||
version = batchExpireVersionV1
|
||||
format = batchExpireFormat
|
||||
default:
|
||||
return errors.New("no supported batch job request specified")
|
||||
}
|
||||
data, err := readConfig(ctx, api, pathJoin(job.Location, fileName))
|
||||
if err != nil {
|
||||
@@ -656,6 +755,11 @@ func (ri *batchJobInfo) load(ctx context.Context, api ObjectLayer, job BatchJobR
|
||||
if job.KeyRotate.Flags.Retry.Attempts > 0 {
|
||||
ri.RetryAttempts = job.KeyRotate.Flags.Retry.Attempts
|
||||
}
|
||||
case job.Expire != nil:
|
||||
ri.RetryAttempts = batchExpireJobDefaultRetries
|
||||
if job.Expire.Retry.Attempts > 0 {
|
||||
ri.RetryAttempts = job.Expire.Retry.Attempts
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -765,6 +869,12 @@ func (ri *batchJobInfo) updateAfter(ctx context.Context, api ObjectLayer, durati
|
||||
jobTyp = string(job.Type())
|
||||
fileName = batchKeyRotationName
|
||||
ri.Version = batchKeyRotateVersionV1
|
||||
case madmin.BatchJobExpire:
|
||||
format = batchExpireFormat
|
||||
version = batchExpireVersion
|
||||
jobTyp = string(job.Type())
|
||||
fileName = batchExpireName
|
||||
ri.Version = batchExpireVersionV1
|
||||
default:
|
||||
return errInvalidArgument
|
||||
}
|
||||
@@ -790,7 +900,18 @@ func (ri *batchJobInfo) updateAfter(ctx context.Context, api ObjectLayer, durati
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, failed bool) {
|
||||
// Note: to be used only with batch jobs that affect multiple versions through
|
||||
// a single action. e.g batch-expire has an option to expire all versions of an
|
||||
// object which matches the given filters.
|
||||
func (ri *batchJobInfo) trackMultipleObjectVersions(bucket string, info ObjectInfo, success bool) {
|
||||
if success {
|
||||
ri.Objects += int64(info.NumVersions)
|
||||
} else {
|
||||
ri.ObjectsFailed += int64(info.NumVersions)
|
||||
}
|
||||
}
|
||||
|
||||
func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, success bool) {
|
||||
if ri == nil {
|
||||
return
|
||||
}
|
||||
@@ -800,7 +921,7 @@ func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo,
|
||||
|
||||
ri.Bucket = bucket
|
||||
ri.Object = info.Name
|
||||
ri.countItem(info.Size, info.DeleteMarker, failed)
|
||||
ri.countItem(info.Size, info.DeleteMarker, success)
|
||||
}
|
||||
|
||||
// Start start the batch replication job, resumes if there was a pending job via "job.ID"
|
||||
@@ -813,6 +934,9 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
if ri.Complete {
|
||||
return nil
|
||||
}
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
lastObject := ri.Object
|
||||
|
||||
@@ -822,7 +946,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
}
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
skip := func(info FileInfo) (ok bool) {
|
||||
selectObj := func(info FileInfo) (ok bool) {
|
||||
if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan {
|
||||
// skip all objects that are newer than specified older duration
|
||||
return false
|
||||
@@ -907,8 +1031,73 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
|
||||
|
||||
var (
|
||||
walkCh = make(chan ObjectInfo, 100)
|
||||
slowCh = make(chan ObjectInfo, 100)
|
||||
)
|
||||
|
||||
if !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() {
|
||||
go func() {
|
||||
defer close(slowCh)
|
||||
|
||||
// Snowball currently needs the high level minio-go Client, not the Core one
|
||||
cl, err := miniogo.New(u.Host, &miniogo.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport,
|
||||
BucketLookup: lookupStyle(r.Target.Path),
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Already validated before arriving here
|
||||
smallerThan, _ := humanize.ParseBytes(*r.Source.Snowball.SmallerThan)
|
||||
|
||||
var (
|
||||
obj = ObjectInfo{}
|
||||
batch = make([]ObjectInfo, 0, *r.Source.Snowball.Batch)
|
||||
valid = true
|
||||
)
|
||||
|
||||
for valid {
|
||||
obj, valid = <-walkCh
|
||||
|
||||
if !valid {
|
||||
goto write
|
||||
}
|
||||
|
||||
if obj.DeleteMarker || !obj.VersionPurgeStatus.Empty() || obj.Size >= int64(smallerThan) {
|
||||
slowCh <- obj
|
||||
continue
|
||||
}
|
||||
|
||||
batch = append(batch, obj)
|
||||
|
||||
if len(batch) < *r.Source.Snowball.Batch {
|
||||
continue
|
||||
}
|
||||
|
||||
write:
|
||||
if len(batch) > 0 {
|
||||
if err := r.writeAsArchive(ctx, api, cl, batch); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
for _, b := range batch {
|
||||
slowCh <- b
|
||||
}
|
||||
}
|
||||
batch = batch[:0]
|
||||
}
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
slowCh = walkCh
|
||||
}
|
||||
|
||||
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -920,6 +1109,11 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
return err
|
||||
}
|
||||
|
||||
walkQuorum := env.Get("_MINIO_BATCH_REPLICATION_WALK_QUORUM", "strict")
|
||||
if walkQuorum == "" {
|
||||
walkQuorum = "strict"
|
||||
}
|
||||
|
||||
retryAttempts := ri.RetryAttempts
|
||||
retry := false
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
@@ -930,9 +1124,10 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
|
||||
|
||||
results := make(chan ObjectInfo, 100)
|
||||
if err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, results, ObjectOptions{
|
||||
WalkMarker: lastObject,
|
||||
WalkFilter: skip,
|
||||
if err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
Filter: selectObj,
|
||||
AskDisks: walkQuorum,
|
||||
}); err != nil {
|
||||
cancel()
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
@@ -942,7 +1137,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
prevObj := ""
|
||||
|
||||
skipReplicate := false
|
||||
for result := range results {
|
||||
for result := range slowCh {
|
||||
result := result
|
||||
if result.Name != prevObj {
|
||||
prevObj = result.Name
|
||||
@@ -955,7 +1150,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
go func() {
|
||||
defer wk.Give()
|
||||
|
||||
stopFn := globalBatchJobsMetrics.trace(batchReplicationMetricObject, job.ID, attempts, result)
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricReplication, job.ID, attempts)
|
||||
success := true
|
||||
if err := r.ReplicateToTarget(ctx, api, c, result, retry); err != nil {
|
||||
if miniogo.ToErrorResponse(err).Code == "PreconditionFailed" {
|
||||
@@ -966,16 +1161,20 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
if isErrVersionNotFound(err) || isErrObjectNotFound(err) {
|
||||
return
|
||||
}
|
||||
stopFn(err)
|
||||
stopFn(result, err)
|
||||
logger.LogIf(ctx, err)
|
||||
success = false
|
||||
} else {
|
||||
stopFn(nil)
|
||||
stopFn(result, nil)
|
||||
}
|
||||
ri.trackCurrentBucketObject(r.Source.Bucket, result, success)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
|
||||
if wait := globalBatchConfig.ReplicationWait(); wait > 0 {
|
||||
time.Sleep(wait)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wk.Wait()
|
||||
@@ -1056,6 +1255,9 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
if err := r.Source.Type.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.Source.Snowball.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
|
||||
return errInvalidArgument
|
||||
}
|
||||
@@ -1177,6 +1379,8 @@ func (j BatchJobRequest) Type() madmin.BatchJobType {
|
||||
return madmin.BatchJobReplicate
|
||||
case j.KeyRotate != nil:
|
||||
return madmin.BatchJobKeyRotate
|
||||
case j.Expire != nil:
|
||||
return madmin.BatchJobExpire
|
||||
}
|
||||
return madmin.BatchJobType("unknown")
|
||||
}
|
||||
@@ -1189,6 +1393,8 @@ func (j BatchJobRequest) Validate(ctx context.Context, o ObjectLayer) error {
|
||||
return j.Replicate.Validate(ctx, j, o)
|
||||
case j.KeyRotate != nil:
|
||||
return j.KeyRotate.Validate(ctx, j, o)
|
||||
case j.Expire != nil:
|
||||
return j.Expire.Validate(ctx, j, o)
|
||||
}
|
||||
return errInvalidArgument
|
||||
}
|
||||
@@ -1199,12 +1405,14 @@ func (j BatchJobRequest) delete(ctx context.Context, api ObjectLayer) {
|
||||
deleteConfig(ctx, api, pathJoin(j.Location, batchReplName))
|
||||
case j.KeyRotate != nil:
|
||||
deleteConfig(ctx, api, pathJoin(j.Location, batchKeyRotationName))
|
||||
case j.Expire != nil:
|
||||
deleteConfig(ctx, api, pathJoin(j.Location, batchExpireName))
|
||||
}
|
||||
deleteConfig(ctx, api, j.Location)
|
||||
}
|
||||
|
||||
func (j *BatchJobRequest) save(ctx context.Context, api ObjectLayer) error {
|
||||
if j.Replicate == nil && j.KeyRotate == nil {
|
||||
if j.Replicate == nil && j.KeyRotate == nil && j.Expire == nil {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
@@ -1258,7 +1466,7 @@ func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (p
|
||||
func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListBatchJobsAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ListBatchJobsAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1273,7 +1481,7 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := objectAPI.Walk(ctx, minioMetaBucket, batchJobPrefix, resultCh, ObjectOptions{}); err != nil {
|
||||
if err := objectAPI.Walk(ctx, minioMetaBucket, batchJobPrefix, resultCh, WalkOptions{}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1308,7 +1516,7 @@ var errNoSuchJob = errors.New("no such job")
|
||||
func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DescribeBatchJobAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.DescribeBatchJobAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1343,7 +1551,7 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques
|
||||
func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, creds := validateAdminReq(ctx, w, r, iampolicy.StartBatchJobAction)
|
||||
objectAPI, creds := validateAdminReq(ctx, w, r, policy.StartBatchJobAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1365,6 +1573,34 @@ func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
// Fill with default values
|
||||
if job.Replicate != nil {
|
||||
if job.Replicate.Source.Snowball.Disable == nil {
|
||||
job.Replicate.Source.Snowball.Disable = ptr(false)
|
||||
}
|
||||
if job.Replicate.Source.Snowball.Batch == nil {
|
||||
job.Replicate.Source.Snowball.Batch = ptr(100)
|
||||
}
|
||||
if job.Replicate.Source.Snowball.InMemory == nil {
|
||||
job.Replicate.Source.Snowball.InMemory = ptr(true)
|
||||
}
|
||||
if job.Replicate.Source.Snowball.Compress == nil {
|
||||
job.Replicate.Source.Snowball.Compress = ptr(false)
|
||||
}
|
||||
if job.Replicate.Source.Snowball.SmallerThan == nil {
|
||||
job.Replicate.Source.Snowball.SmallerThan = ptr("5MiB")
|
||||
}
|
||||
if job.Replicate.Source.Snowball.SkipErrs == nil {
|
||||
job.Replicate.Source.Snowball.SkipErrs = ptr(true)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the incoming job request
|
||||
if err := job.Validate(ctx, objectAPI); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
job.ID = fmt.Sprintf("%s:%d", shortuuid.New(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
job.User = user
|
||||
job.Started = time.Now()
|
||||
@@ -1397,7 +1633,7 @@ func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request)
|
||||
func (a adminAPIHandlers) CancelBatchJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.CancelBatchJobAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.CancelBatchJobAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -1462,7 +1698,7 @@ func (j *BatchJobPool) resume() {
|
||||
results := make(chan ObjectInfo, 100)
|
||||
ctx, cancel := context.WithCancel(j.ctx)
|
||||
defer cancel()
|
||||
if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobPrefix, results, ObjectOptions{}); err != nil {
|
||||
if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobPrefix, results, WalkOptions{}); err != nil {
|
||||
logger.LogIf(j.ctx, err)
|
||||
return
|
||||
}
|
||||
@@ -1501,7 +1737,8 @@ func (j *BatchJobPool) AddWorker() {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if job.Replicate != nil {
|
||||
switch {
|
||||
case job.Replicate != nil:
|
||||
if job.Replicate.RemoteToLocal() {
|
||||
if err := job.Replicate.StartFromSource(job.ctx, j.objLayer, *job); err != nil {
|
||||
if !isErrBucketNotFound(err) {
|
||||
@@ -1521,14 +1758,20 @@ func (j *BatchJobPool) AddWorker() {
|
||||
// Bucket not found proceed to delete such a job.
|
||||
}
|
||||
}
|
||||
}
|
||||
if job.KeyRotate != nil {
|
||||
case job.KeyRotate != nil:
|
||||
if err := job.KeyRotate.Start(job.ctx, j.objLayer, *job); err != nil {
|
||||
if !isErrBucketNotFound(err) {
|
||||
logger.LogIf(j.ctx, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
case job.Expire != nil:
|
||||
if err := job.Expire.Start(job.ctx, j.objLayer, *job); err != nil {
|
||||
if !isErrBucketNotFound(err) {
|
||||
logger.LogIf(j.ctx, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
job.delete(j.ctx, j.objLayer)
|
||||
j.canceler(job.ID, false)
|
||||
@@ -1606,24 +1849,26 @@ type batchJobMetrics struct {
|
||||
type batchJobMetric uint8
|
||||
|
||||
const (
|
||||
batchReplicationMetricObject batchJobMetric = iota
|
||||
batchKeyRotationMetricObject
|
||||
batchJobMetricReplication batchJobMetric = iota
|
||||
batchJobMetricKeyRotation
|
||||
batchJobMetricExpire
|
||||
)
|
||||
|
||||
func batchJobTrace(d batchJobMetric, job string, startTime time.Time, duration time.Duration, info ObjectInfo, attempts int, err error) madmin.TraceInfo {
|
||||
func batchJobTrace(d batchJobMetric, job string, startTime time.Time, duration time.Duration, info objTraceInfoer, attempts int, err error) madmin.TraceInfo {
|
||||
var errStr string
|
||||
if err != nil {
|
||||
errStr = err.Error()
|
||||
}
|
||||
jobKind := "batchReplication"
|
||||
traceType := madmin.TraceBatchReplication
|
||||
if d == batchKeyRotationMetricObject {
|
||||
jobKind = "batchKeyRotation"
|
||||
switch d {
|
||||
case batchJobMetricKeyRotation:
|
||||
traceType = madmin.TraceBatchKeyRotation
|
||||
case batchJobMetricExpire:
|
||||
traceType = madmin.TraceBatchExpire
|
||||
}
|
||||
funcName := fmt.Sprintf("%s.%s (job-name=%s)", jobKind, d.String(), job)
|
||||
funcName := fmt.Sprintf("%s() (job-name=%s)", d.String(), job)
|
||||
if attempts > 0 {
|
||||
funcName = fmt.Sprintf("%s.%s (job-name=%s,attempts=%s)", jobKind, d.String(), job, humanize.Ordinal(attempts))
|
||||
funcName = fmt.Sprintf("%s() (job-name=%s,attempts=%s)", d.String(), job, humanize.Ordinal(attempts))
|
||||
}
|
||||
return madmin.TraceInfo{
|
||||
TraceType: traceType,
|
||||
@@ -1631,55 +1876,65 @@ func batchJobTrace(d batchJobMetric, job string, startTime time.Time, duration t
|
||||
NodeName: globalLocalNodeName,
|
||||
FuncName: funcName,
|
||||
Duration: duration,
|
||||
Path: info.Name,
|
||||
Path: fmt.Sprintf("%s (versionID=%s)", info.TraceObjName(), info.TraceVersionID()),
|
||||
Error: errStr,
|
||||
}
|
||||
}
|
||||
|
||||
func (ri *batchJobInfo) metric() madmin.JobMetric {
|
||||
m := madmin.JobMetric{
|
||||
JobID: ri.JobID,
|
||||
JobType: ri.JobType,
|
||||
StartTime: ri.StartTime,
|
||||
LastUpdate: ri.LastUpdate,
|
||||
RetryAttempts: ri.RetryAttempts,
|
||||
Complete: ri.Complete,
|
||||
Failed: ri.Failed,
|
||||
}
|
||||
|
||||
switch ri.JobType {
|
||||
case string(madmin.BatchJobReplicate):
|
||||
m.Replicate = &madmin.ReplicateInfo{
|
||||
Bucket: ri.Bucket,
|
||||
Object: ri.Object,
|
||||
Objects: ri.Objects,
|
||||
ObjectsFailed: ri.ObjectsFailed,
|
||||
BytesTransferred: ri.BytesTransferred,
|
||||
BytesFailed: ri.BytesFailed,
|
||||
}
|
||||
case string(madmin.BatchJobKeyRotate):
|
||||
m.KeyRotate = &madmin.KeyRotationInfo{
|
||||
Bucket: ri.Bucket,
|
||||
Object: ri.Object,
|
||||
Objects: ri.Objects,
|
||||
ObjectsFailed: ri.ObjectsFailed,
|
||||
}
|
||||
case string(madmin.BatchJobExpire):
|
||||
m.Expired = &madmin.ExpirationInfo{
|
||||
Bucket: ri.Bucket,
|
||||
Object: ri.Object,
|
||||
Objects: ri.Objects,
|
||||
ObjectsFailed: ri.ObjectsFailed,
|
||||
}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *batchJobMetrics) report(jobID string) (metrics *madmin.BatchJobMetrics) {
|
||||
metrics = &madmin.BatchJobMetrics{CollectedAt: time.Now(), Jobs: make(map[string]madmin.JobMetric)}
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
match := true
|
||||
if jobID != "" {
|
||||
if job, ok := m.metrics[jobID]; ok {
|
||||
metrics.Jobs[jobID] = job.metric()
|
||||
}
|
||||
return metrics
|
||||
}
|
||||
|
||||
for id, job := range m.metrics {
|
||||
if jobID != "" {
|
||||
match = id == jobID
|
||||
}
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
|
||||
m := madmin.JobMetric{
|
||||
JobID: job.JobID,
|
||||
JobType: job.JobType,
|
||||
StartTime: job.StartTime,
|
||||
LastUpdate: job.LastUpdate,
|
||||
RetryAttempts: job.RetryAttempts,
|
||||
Complete: job.Complete,
|
||||
Failed: job.Failed,
|
||||
}
|
||||
|
||||
switch job.JobType {
|
||||
case string(madmin.BatchJobReplicate):
|
||||
m.Replicate = &madmin.ReplicateInfo{
|
||||
Bucket: job.Bucket,
|
||||
Object: job.Object,
|
||||
Objects: job.Objects,
|
||||
ObjectsFailed: job.ObjectsFailed,
|
||||
BytesTransferred: job.BytesTransferred,
|
||||
BytesFailed: job.BytesFailed,
|
||||
}
|
||||
case string(madmin.BatchJobKeyRotate):
|
||||
m.KeyRotate = &madmin.KeyRotationInfo{
|
||||
Bucket: job.Bucket,
|
||||
Object: job.Object,
|
||||
Objects: job.Objects,
|
||||
ObjectsFailed: job.ObjectsFailed,
|
||||
}
|
||||
}
|
||||
|
||||
metrics.Jobs[id] = m
|
||||
metrics.Jobs[id] = job.metric()
|
||||
}
|
||||
return metrics
|
||||
}
|
||||
@@ -1724,19 +1979,52 @@ func (m *batchJobMetrics) save(jobID string, ri *batchJobInfo) {
|
||||
m.metrics[jobID] = ri.clone()
|
||||
}
|
||||
|
||||
func (m *batchJobMetrics) trace(d batchJobMetric, job string, attempts int, info ObjectInfo) func(err error) {
|
||||
type objTraceInfoer interface {
|
||||
TraceObjName() string
|
||||
TraceVersionID() string
|
||||
}
|
||||
|
||||
// TraceObjName returns name of object being traced
|
||||
func (td ObjectToDelete) TraceObjName() string {
|
||||
return td.ObjectName
|
||||
}
|
||||
|
||||
// TraceVersionID returns version-id of object being traced
|
||||
func (td ObjectToDelete) TraceVersionID() string {
|
||||
return td.VersionID
|
||||
}
|
||||
|
||||
// TraceObjName returns name of object being traced
|
||||
func (oi ObjectInfo) TraceObjName() string {
|
||||
return oi.Name
|
||||
}
|
||||
|
||||
// TraceVersionID returns version-id of object being traced
|
||||
func (oi ObjectInfo) TraceVersionID() string {
|
||||
return oi.VersionID
|
||||
}
|
||||
|
||||
func (m *batchJobMetrics) trace(d batchJobMetric, job string, attempts int) func(info objTraceInfoer, err error) {
|
||||
startTime := time.Now()
|
||||
return func(err error) {
|
||||
return func(info objTraceInfoer, err error) {
|
||||
duration := time.Since(startTime)
|
||||
if globalTrace.NumSubscribers(madmin.TraceBatch) > 0 {
|
||||
globalTrace.Publish(batchJobTrace(d, job, startTime, duration, info, attempts, err))
|
||||
return
|
||||
}
|
||||
switch d {
|
||||
case batchReplicationMetricObject:
|
||||
case batchJobMetricReplication:
|
||||
if globalTrace.NumSubscribers(madmin.TraceBatchReplication) > 0 {
|
||||
globalTrace.Publish(batchJobTrace(d, job, startTime, duration, info, attempts, err))
|
||||
}
|
||||
case batchKeyRotationMetricObject:
|
||||
case batchJobMetricKeyRotation:
|
||||
if globalTrace.NumSubscribers(madmin.TraceBatchKeyRotation) > 0 {
|
||||
globalTrace.Publish(batchJobTrace(d, job, startTime, duration, info, attempts, err))
|
||||
}
|
||||
case batchJobMetricExpire:
|
||||
if globalTrace.NumSubscribers(madmin.TraceBatchExpire) > 0 {
|
||||
globalTrace.Publish(batchJobTrace(d, job, startTime, duration, info, attempts, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,6 +84,24 @@ func (z *BatchJobRequest) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Expire":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Expire")
|
||||
return
|
||||
}
|
||||
z.Expire = nil
|
||||
} else {
|
||||
if z.Expire == nil {
|
||||
z.Expire = new(BatchJobExpire)
|
||||
}
|
||||
err = z.Expire.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Expire")
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -97,9 +115,9 @@ func (z *BatchJobRequest) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BatchJobRequest) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 6
|
||||
// map header, size 7
|
||||
// write "ID"
|
||||
err = en.Append(0x86, 0xa2, 0x49, 0x44)
|
||||
err = en.Append(0x87, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -172,15 +190,32 @@ func (z *BatchJobRequest) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Expire"
|
||||
err = en.Append(0xa6, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.Expire == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = z.Expire.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Expire")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BatchJobRequest) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 6
|
||||
// map header, size 7
|
||||
// string "ID"
|
||||
o = append(o, 0x86, 0xa2, 0x49, 0x44)
|
||||
o = append(o, 0x87, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
// string "User"
|
||||
o = append(o, 0xa4, 0x55, 0x73, 0x65, 0x72)
|
||||
@@ -213,6 +248,17 @@ func (z *BatchJobRequest) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// string "Expire"
|
||||
o = append(o, 0xa6, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65)
|
||||
if z.Expire == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o, err = z.Expire.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Expire")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -292,6 +338,23 @@ func (z *BatchJobRequest) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Expire":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.Expire = nil
|
||||
} else {
|
||||
if z.Expire == nil {
|
||||
z.Expire = new(BatchJobExpire)
|
||||
}
|
||||
bts, err = z.Expire.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Expire")
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -318,6 +381,12 @@ func (z *BatchJobRequest) Msgsize() (s int) {
|
||||
} else {
|
||||
s += z.KeyRotate.Msgsize()
|
||||
}
|
||||
s += 7
|
||||
if z.Expire == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += z.Expire.Msgsize()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -18,9 +18,11 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
)
|
||||
|
||||
@@ -81,3 +83,81 @@ func (r BatchJobRetry) Validate() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// # snowball based archive transfer is by default enabled when source
|
||||
// # is local and target is remote which is also minio.
|
||||
// snowball:
|
||||
// disable: false # optionally turn-off snowball archive transfer
|
||||
// batch: 100 # upto this many objects per archive
|
||||
// inmemory: true # indicates if the archive must be staged locally or in-memory
|
||||
// compress: true # S2/Snappy compressed archive
|
||||
// smallerThan: 5MiB # create archive for all objects smaller than 5MiB
|
||||
// skipErrs: false # skips any source side read() errors
|
||||
|
||||
// BatchJobSnowball describes the snowball feature when replicating objects from a local source to a remote target
|
||||
type BatchJobSnowball struct {
|
||||
Disable *bool `yaml:"disable" json:"disable"`
|
||||
Batch *int `yaml:"batch" json:"batch"`
|
||||
InMemory *bool `yaml:"inmemory" json:"inmemory"`
|
||||
Compress *bool `yaml:"compress" json:"compress"`
|
||||
SmallerThan *string `yaml:"smallerThan" json:"smallerThan"`
|
||||
SkipErrs *bool `yaml:"skipErrs" json:"skipErrs"`
|
||||
}
|
||||
|
||||
// Validate the snowball parameters in the job description
|
||||
func (b BatchJobSnowball) Validate() error {
|
||||
if *b.Batch <= 0 {
|
||||
return errors.New("batch number should be non positive zero")
|
||||
}
|
||||
_, err := humanize.ParseBytes(*b.SmallerThan)
|
||||
return err
|
||||
}
|
||||
|
||||
// BatchJobSizeFilter supports size based filters - LesserThan and GreaterThan
|
||||
type BatchJobSizeFilter struct {
|
||||
UpperBound BatchJobSize `yaml:"lessThan" json:"lessThan"`
|
||||
LowerBound BatchJobSize `yaml:"greaterThan" json:"greaterThan"`
|
||||
}
|
||||
|
||||
// InRange returns true in the following cases and false otherwise,
|
||||
// - sf.LowerBound < sz, when sf.LowerBound alone is specified
|
||||
// - sz < sf.UpperBound, when sf.UpperBound alone is specified
|
||||
// - sf.LowerBound < sz < sf.UpperBound when both are specified,
|
||||
func (sf BatchJobSizeFilter) InRange(sz int64) bool {
|
||||
if sf.UpperBound > 0 && sz > int64(sf.UpperBound) {
|
||||
return false
|
||||
}
|
||||
|
||||
if sf.LowerBound > 0 && sz < int64(sf.LowerBound) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var errInvalidBatchJobSizeFilter = errors.New("invalid batch-job size filter")
|
||||
|
||||
// Validate checks if sf is a valid batch-job size filter
|
||||
func (sf BatchJobSizeFilter) Validate() error {
|
||||
if sf.LowerBound > 0 && sf.UpperBound > 0 && sf.LowerBound >= sf.UpperBound {
|
||||
return errInvalidBatchJobSizeFilter
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchJobSize supports humanized byte values in yaml files type BatchJobSize uint64
|
||||
type BatchJobSize int64
|
||||
|
||||
// UnmarshalYAML to parse humanized byte values
|
||||
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var batchExpireSz string
|
||||
err := unmarshal(&batchExpireSz)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sz, err := humanize.ParseBytes(batchExpireSz)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*s = BatchJobSize(sz)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -389,3 +389,666 @@ func (z BatchJobRetry) Msgsize() (s int) {
|
||||
s = 1 + 9 + msgp.IntSize + 6 + msgp.DurationSize
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobSize) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
{
|
||||
var zb0001 int64
|
||||
zb0001, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
(*z) = BatchJobSize(zb0001)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z BatchJobSize) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = en.WriteInt64(int64(z))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z BatchJobSize) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
o = msgp.AppendInt64(o, int64(z))
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobSize) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
{
|
||||
var zb0001 int64
|
||||
zb0001, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
(*z) = BatchJobSize(zb0001)
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z BatchJobSize) Msgsize() (s int) {
|
||||
s = msgp.Int64Size
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobSizeFilter) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "UpperBound":
|
||||
{
|
||||
var zb0002 int64
|
||||
zb0002, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UpperBound")
|
||||
return
|
||||
}
|
||||
z.UpperBound = BatchJobSize(zb0002)
|
||||
}
|
||||
case "LowerBound":
|
||||
{
|
||||
var zb0003 int64
|
||||
zb0003, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LowerBound")
|
||||
return
|
||||
}
|
||||
z.LowerBound = BatchJobSize(zb0003)
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z BatchJobSizeFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 2
|
||||
// write "UpperBound"
|
||||
err = en.Append(0x82, 0xaa, 0x55, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt64(int64(z.UpperBound))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UpperBound")
|
||||
return
|
||||
}
|
||||
// write "LowerBound"
|
||||
err = en.Append(0xaa, 0x4c, 0x6f, 0x77, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt64(int64(z.LowerBound))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LowerBound")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z BatchJobSizeFilter) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 2
|
||||
// string "UpperBound"
|
||||
o = append(o, 0x82, 0xaa, 0x55, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64)
|
||||
o = msgp.AppendInt64(o, int64(z.UpperBound))
|
||||
// string "LowerBound"
|
||||
o = append(o, 0xaa, 0x4c, 0x6f, 0x77, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64)
|
||||
o = msgp.AppendInt64(o, int64(z.LowerBound))
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobSizeFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "UpperBound":
|
||||
{
|
||||
var zb0002 int64
|
||||
zb0002, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UpperBound")
|
||||
return
|
||||
}
|
||||
z.UpperBound = BatchJobSize(zb0002)
|
||||
}
|
||||
case "LowerBound":
|
||||
{
|
||||
var zb0003 int64
|
||||
zb0003, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LowerBound")
|
||||
return
|
||||
}
|
||||
z.LowerBound = BatchJobSize(zb0003)
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z BatchJobSizeFilter) Msgsize() (s int) {
|
||||
s = 1 + 11 + msgp.Int64Size + 11 + msgp.Int64Size
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobSnowball) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Disable":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disable")
|
||||
return
|
||||
}
|
||||
z.Disable = nil
|
||||
} else {
|
||||
if z.Disable == nil {
|
||||
z.Disable = new(bool)
|
||||
}
|
||||
*z.Disable, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disable")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Batch":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Batch")
|
||||
return
|
||||
}
|
||||
z.Batch = nil
|
||||
} else {
|
||||
if z.Batch == nil {
|
||||
z.Batch = new(int)
|
||||
}
|
||||
*z.Batch, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Batch")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "InMemory":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "InMemory")
|
||||
return
|
||||
}
|
||||
z.InMemory = nil
|
||||
} else {
|
||||
if z.InMemory == nil {
|
||||
z.InMemory = new(bool)
|
||||
}
|
||||
*z.InMemory, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "InMemory")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Compress":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Compress")
|
||||
return
|
||||
}
|
||||
z.Compress = nil
|
||||
} else {
|
||||
if z.Compress == nil {
|
||||
z.Compress = new(bool)
|
||||
}
|
||||
*z.Compress, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Compress")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "SmallerThan":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SmallerThan")
|
||||
return
|
||||
}
|
||||
z.SmallerThan = nil
|
||||
} else {
|
||||
if z.SmallerThan == nil {
|
||||
z.SmallerThan = new(string)
|
||||
}
|
||||
*z.SmallerThan, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SmallerThan")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "SkipErrs":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SkipErrs")
|
||||
return
|
||||
}
|
||||
z.SkipErrs = nil
|
||||
} else {
|
||||
if z.SkipErrs == nil {
|
||||
z.SkipErrs = new(bool)
|
||||
}
|
||||
*z.SkipErrs, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SkipErrs")
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BatchJobSnowball) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 6
|
||||
// write "Disable"
|
||||
err = en.Append(0x86, 0xa7, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.Disable == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteBool(*z.Disable)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disable")
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Batch"
|
||||
err = en.Append(0xa5, 0x42, 0x61, 0x74, 0x63, 0x68)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.Batch == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteInt(*z.Batch)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Batch")
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "InMemory"
|
||||
err = en.Append(0xa8, 0x49, 0x6e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.InMemory == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteBool(*z.InMemory)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "InMemory")
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Compress"
|
||||
err = en.Append(0xa8, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.Compress == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteBool(*z.Compress)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Compress")
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "SmallerThan"
|
||||
err = en.Append(0xab, 0x53, 0x6d, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.SmallerThan == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteString(*z.SmallerThan)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SmallerThan")
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "SkipErrs"
|
||||
err = en.Append(0xa8, 0x53, 0x6b, 0x69, 0x70, 0x45, 0x72, 0x72, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.SkipErrs == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteBool(*z.SkipErrs)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SkipErrs")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BatchJobSnowball) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 6
|
||||
// string "Disable"
|
||||
o = append(o, 0x86, 0xa7, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65)
|
||||
if z.Disable == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendBool(o, *z.Disable)
|
||||
}
|
||||
// string "Batch"
|
||||
o = append(o, 0xa5, 0x42, 0x61, 0x74, 0x63, 0x68)
|
||||
if z.Batch == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendInt(o, *z.Batch)
|
||||
}
|
||||
// string "InMemory"
|
||||
o = append(o, 0xa8, 0x49, 0x6e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79)
|
||||
if z.InMemory == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendBool(o, *z.InMemory)
|
||||
}
|
||||
// string "Compress"
|
||||
o = append(o, 0xa8, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73)
|
||||
if z.Compress == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendBool(o, *z.Compress)
|
||||
}
|
||||
// string "SmallerThan"
|
||||
o = append(o, 0xab, 0x53, 0x6d, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
if z.SmallerThan == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendString(o, *z.SmallerThan)
|
||||
}
|
||||
// string "SkipErrs"
|
||||
o = append(o, 0xa8, 0x53, 0x6b, 0x69, 0x70, 0x45, 0x72, 0x72, 0x73)
|
||||
if z.SkipErrs == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendBool(o, *z.SkipErrs)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobSnowball) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Disable":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.Disable = nil
|
||||
} else {
|
||||
if z.Disable == nil {
|
||||
z.Disable = new(bool)
|
||||
}
|
||||
*z.Disable, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disable")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Batch":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.Batch = nil
|
||||
} else {
|
||||
if z.Batch == nil {
|
||||
z.Batch = new(int)
|
||||
}
|
||||
*z.Batch, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Batch")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "InMemory":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.InMemory = nil
|
||||
} else {
|
||||
if z.InMemory == nil {
|
||||
z.InMemory = new(bool)
|
||||
}
|
||||
*z.InMemory, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "InMemory")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Compress":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.Compress = nil
|
||||
} else {
|
||||
if z.Compress == nil {
|
||||
z.Compress = new(bool)
|
||||
}
|
||||
*z.Compress, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Compress")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "SmallerThan":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.SmallerThan = nil
|
||||
} else {
|
||||
if z.SmallerThan == nil {
|
||||
z.SmallerThan = new(string)
|
||||
}
|
||||
*z.SmallerThan, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SmallerThan")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "SkipErrs":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.SkipErrs = nil
|
||||
} else {
|
||||
if z.SkipErrs == nil {
|
||||
z.SkipErrs = new(bool)
|
||||
}
|
||||
*z.SkipErrs, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SkipErrs")
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobSnowball) Msgsize() (s int) {
|
||||
s = 1 + 8
|
||||
if z.Disable == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.BoolSize
|
||||
}
|
||||
s += 6
|
||||
if z.Batch == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.IntSize
|
||||
}
|
||||
s += 9
|
||||
if z.InMemory == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.BoolSize
|
||||
}
|
||||
s += 9
|
||||
if z.Compress == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.BoolSize
|
||||
}
|
||||
s += 12
|
||||
if z.SmallerThan == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.StringPrefixSize + len(*z.SmallerThan)
|
||||
}
|
||||
s += 9
|
||||
if z.SkipErrs == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.BoolSize
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -347,3 +347,229 @@ func BenchmarkDecodeBatchJobRetry(b *testing.B) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBatchJobSizeFilter(t *testing.T) {
|
||||
v := BatchJobSizeFilter{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobSizeFilter(b *testing.B) {
|
||||
v := BatchJobSizeFilter{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobSizeFilter(b *testing.B) {
|
||||
v := BatchJobSizeFilter{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobSizeFilter(b *testing.B) {
|
||||
v := BatchJobSizeFilter{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobSizeFilter(t *testing.T) {
|
||||
v := BatchJobSizeFilter{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobSizeFilter Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobSizeFilter{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobSizeFilter(b *testing.B) {
|
||||
v := BatchJobSizeFilter{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobSizeFilter(b *testing.B) {
|
||||
v := BatchJobSizeFilter{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBatchJobSnowball(t *testing.T) {
|
||||
v := BatchJobSnowball{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobSnowball(b *testing.B) {
|
||||
v := BatchJobSnowball{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobSnowball(b *testing.B) {
|
||||
v := BatchJobSnowball{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobSnowball(b *testing.B) {
|
||||
v := BatchJobSnowball{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobSnowball(t *testing.T) {
|
||||
v := BatchJobSnowball{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobSnowball Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobSnowball{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobSnowball(b *testing.B) {
|
||||
v := BatchJobSnowball{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobSnowball(b *testing.B) {
|
||||
v := BatchJobSnowball{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
136
cmd/batch-job-common-types_test.go
Normal file
136
cmd/batch-job-common-types_test.go
Normal file
@@ -0,0 +1,136 @@
|
||||
// Copyright (c) 2015-2023 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBatchJobSizeInRange(t *testing.T) {
|
||||
tests := []struct {
|
||||
objSize int64
|
||||
sizeFilter BatchJobSizeFilter
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
// 1Mib < 2Mib < 10MiB -> in range
|
||||
objSize: 2 << 20,
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 10 << 20,
|
||||
LowerBound: 1 << 20,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
// 2KiB < 1 MiB -> out of range from left
|
||||
objSize: 2 << 10,
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 10 << 20,
|
||||
LowerBound: 1 << 20,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
// 11MiB > 10 MiB -> out of range from right
|
||||
objSize: 11 << 20,
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 10 << 20,
|
||||
LowerBound: 1 << 20,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
// 2MiB < 10MiB -> in range
|
||||
objSize: 2 << 20,
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 10 << 20,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
// 2MiB > 1MiB -> in range
|
||||
objSize: 2 << 20,
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
LowerBound: 1 << 20,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("test-%d", i+1), func(t *testing.T) {
|
||||
if got := test.sizeFilter.InRange(test.objSize); got != test.want {
|
||||
t.Fatalf("Expected %v but got %v", test.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchJobSizeValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
sizeFilter BatchJobSizeFilter
|
||||
err error
|
||||
}{
|
||||
{
|
||||
// Unspecified size filter is a valid filter
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 0,
|
||||
LowerBound: 0,
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 0,
|
||||
LowerBound: 1 << 20,
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 10 << 20,
|
||||
LowerBound: 0,
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
// LowerBound > UpperBound -> empty range
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 1 << 20,
|
||||
LowerBound: 10 << 20,
|
||||
},
|
||||
err: errInvalidBatchJobSizeFilter,
|
||||
},
|
||||
{
|
||||
// LowerBound == UpperBound -> empty range
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 1 << 20,
|
||||
LowerBound: 1 << 20,
|
||||
},
|
||||
err: errInvalidBatchJobSizeFilter,
|
||||
},
|
||||
}
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("test-%d", i+1), func(t *testing.T) {
|
||||
if err := test.sizeFilter.Validate(); err != test.err {
|
||||
t.Fatalf("Expected %v but got %v", test.err, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -155,6 +155,7 @@ type BatchJobReplicateSource struct {
|
||||
Endpoint string `yaml:"endpoint" json:"endpoint"`
|
||||
Path string `yaml:"path" json:"path"`
|
||||
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
|
||||
Snowball BatchJobSnowball `yaml:"snowball" json:"snowball"`
|
||||
}
|
||||
|
||||
// ValidPath returns true if path is valid
|
||||
|
||||
@@ -469,6 +469,12 @@ func (z *BatchJobReplicateSource) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
case "Snowball":
|
||||
err = z.Snowball.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Snowball")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -482,9 +488,9 @@ func (z *BatchJobReplicateSource) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BatchJobReplicateSource) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 6
|
||||
// map header, size 7
|
||||
// write "Type"
|
||||
err = en.Append(0x86, 0xa4, 0x54, 0x79, 0x70, 0x65)
|
||||
err = en.Append(0x87, 0xa4, 0x54, 0x79, 0x70, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -569,15 +575,25 @@ func (z *BatchJobReplicateSource) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "Creds", "SessionToken")
|
||||
return
|
||||
}
|
||||
// write "Snowball"
|
||||
err = en.Append(0xa8, 0x53, 0x6e, 0x6f, 0x77, 0x62, 0x61, 0x6c, 0x6c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.Snowball.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Snowball")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BatchJobReplicateSource) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 6
|
||||
// map header, size 7
|
||||
// string "Type"
|
||||
o = append(o, 0x86, 0xa4, 0x54, 0x79, 0x70, 0x65)
|
||||
o = append(o, 0x87, 0xa4, 0x54, 0x79, 0x70, 0x65)
|
||||
o = msgp.AppendString(o, string(z.Type))
|
||||
// string "Bucket"
|
||||
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
|
||||
@@ -603,6 +619,13 @@ func (z *BatchJobReplicateSource) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "SessionToken"
|
||||
o = append(o, 0xac, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e)
|
||||
o = msgp.AppendString(o, z.Creds.SessionToken)
|
||||
// string "Snowball"
|
||||
o = append(o, 0xa8, 0x53, 0x6e, 0x6f, 0x77, 0x62, 0x61, 0x6c, 0x6c)
|
||||
o, err = z.Snowball.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Snowball")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -699,6 +722,12 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
}
|
||||
}
|
||||
}
|
||||
case "Snowball":
|
||||
bts, err = z.Snowball.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Snowball")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -713,7 +742,7 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobReplicateSource) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken)
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + 9 + z.Snowball.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -154,7 +154,6 @@ type BatchJobKeyRotateV1 struct {
|
||||
Flags BatchJobKeyRotateFlags `yaml:"flags" json:"flags"`
|
||||
Bucket string `yaml:"bucket" json:"bucket"`
|
||||
Prefix string `yaml:"prefix" json:"prefix"`
|
||||
Endpoint string `yaml:"endpoint" json:"endpoint"`
|
||||
Encryption BatchJobKeyRotateEncryption `yaml:"encryption" json:"encryption"`
|
||||
}
|
||||
|
||||
@@ -261,6 +260,9 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
if ri.Complete {
|
||||
return nil
|
||||
}
|
||||
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
lastObject := ri.Object
|
||||
@@ -356,9 +358,9 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
results := make(chan ObjectInfo, 100)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, ObjectOptions{
|
||||
WalkMarker: lastObject,
|
||||
WalkFilter: skip,
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
Filter: skip,
|
||||
}); err != nil {
|
||||
cancel()
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
@@ -377,14 +379,14 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
defer wk.Give()
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
attempts := attempts
|
||||
stopFn := globalBatchJobsMetrics.trace(batchKeyRotationMetricObject, job.ID, attempts, result)
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricKeyRotation, job.ID, attempts)
|
||||
success := true
|
||||
if err := r.KeyRotate(ctx, api, result); err != nil {
|
||||
stopFn(err)
|
||||
stopFn(result, err)
|
||||
logger.LogIf(ctx, err)
|
||||
success = false
|
||||
} else {
|
||||
stopFn(nil)
|
||||
stopFn(result, nil)
|
||||
}
|
||||
ri.trackCurrentBucketObject(r.Bucket, result, success)
|
||||
ri.RetryAttempts = attempts
|
||||
@@ -398,6 +400,10 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
time.Sleep(delay + time.Duration(rnd.Float64()*float64(delay)))
|
||||
}
|
||||
}
|
||||
|
||||
if wait := globalBatchConfig.KeyRotationWait(); wait > 0 {
|
||||
time.Sleep(wait)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wk.Wait()
|
||||
|
||||
@@ -409,12 +409,6 @@ func (z *BatchJobKeyRotateV1) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
case "Endpoint":
|
||||
z.Endpoint, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
case "Encryption":
|
||||
err = z.Encryption.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
@@ -434,9 +428,9 @@ func (z *BatchJobKeyRotateV1) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BatchJobKeyRotateV1) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 6
|
||||
// map header, size 5
|
||||
// write "APIVersion"
|
||||
err = en.Append(0x86, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
|
||||
err = en.Append(0x85, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -501,16 +495,6 @@ func (z *BatchJobKeyRotateV1) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
// write "Endpoint"
|
||||
err = en.Append(0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Endpoint)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
// write "Encryption"
|
||||
err = en.Append(0xaa, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e)
|
||||
if err != nil {
|
||||
@@ -527,9 +511,9 @@ func (z *BatchJobKeyRotateV1) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BatchJobKeyRotateV1) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 6
|
||||
// map header, size 5
|
||||
// string "APIVersion"
|
||||
o = append(o, 0x86, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
|
||||
o = append(o, 0x85, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
|
||||
o = msgp.AppendString(o, z.APIVersion)
|
||||
// string "Flags"
|
||||
o = append(o, 0xa5, 0x46, 0x6c, 0x61, 0x67, 0x73)
|
||||
@@ -561,9 +545,6 @@ func (z *BatchJobKeyRotateV1) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "Prefix"
|
||||
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
o = msgp.AppendString(o, z.Prefix)
|
||||
// string "Endpoint"
|
||||
o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
o = msgp.AppendString(o, z.Endpoint)
|
||||
// string "Encryption"
|
||||
o = append(o, 0xaa, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e)
|
||||
o, err = z.Encryption.MarshalMsg(o)
|
||||
@@ -651,12 +632,6 @@ func (z *BatchJobKeyRotateV1) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
case "Endpoint":
|
||||
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
case "Encryption":
|
||||
bts, err = z.Encryption.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
@@ -677,7 +652,7 @@ func (z *BatchJobKeyRotateV1) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobKeyRotateV1) Msgsize() (s int) {
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 6 + 1 + 7 + z.Flags.Filter.Msgsize() + 7 + z.Flags.Notify.Msgsize() + 6 + z.Flags.Retry.Msgsize() + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 11 + z.Encryption.Msgsize()
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 6 + 1 + 7 + z.Flags.Filter.Msgsize() + 7 + z.Flags.Notify.Msgsize() + 6 + z.Flags.Retry.Msgsize() + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 11 + z.Encryption.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -8,13 +8,14 @@ func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[batchReplicationMetricObject-0]
|
||||
_ = x[batchKeyRotationMetricObject-1]
|
||||
_ = x[batchJobMetricReplication-0]
|
||||
_ = x[batchJobMetricKeyRotation-1]
|
||||
_ = x[batchJobMetricExpire-2]
|
||||
}
|
||||
|
||||
const _batchJobMetric_name = "batchReplicationMetricObjectbatchKeyRotationMetricObject"
|
||||
const _batchJobMetric_name = "ReplicationKeyRotationExpire"
|
||||
|
||||
var _batchJobMetric_index = [...]uint8{0, 28, 56}
|
||||
var _batchJobMetric_index = [...]uint8{0, 11, 22, 28}
|
||||
|
||||
func (i batchJobMetric) String() string {
|
||||
if i >= batchJobMetric(len(_batchJobMetric_index)-1) {
|
||||
|
||||
@@ -95,7 +95,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
h := algo.New()
|
||||
|
||||
bw := &streamingBitrotWriter{
|
||||
iow: ioutil.NewDeadlineWriter(w, diskMaxTimeout),
|
||||
iow: ioutil.NewDeadlineWriter(w, globalDriveConfig.GetMaxTimeout()),
|
||||
closeWithErr: w.CloseWithError,
|
||||
h: h,
|
||||
shardSize: shardSize,
|
||||
|
||||
@@ -20,12 +20,14 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
@@ -82,23 +84,40 @@ func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
|
||||
ep.Platform, s2.MinioEndpoints[i].Platform)
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(s1.MinioEnv, s2.MinioEnv) {
|
||||
var missing []string
|
||||
var mismatching []string
|
||||
for k, v := range s1.MinioEnv {
|
||||
ev, ok := s2.MinioEnv[k]
|
||||
if !ok {
|
||||
missing = append(missing, k)
|
||||
} else if v != ev {
|
||||
mismatching = append(mismatching, k)
|
||||
}
|
||||
}
|
||||
if len(mismatching) > 0 {
|
||||
return fmt.Errorf(`Expected same MINIO_ environment variables and values across all servers: Missing environment values: %s / Mismatch environment values: %s`, missing, mismatching)
|
||||
}
|
||||
return fmt.Errorf(`Expected same MINIO_ environment variables and values across all servers: Missing environment values: %s`, missing)
|
||||
if reflect.DeepEqual(s1.MinioEnv, s2.MinioEnv) {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
|
||||
// Report differences in environment variables.
|
||||
var missing []string
|
||||
var mismatching []string
|
||||
for k, v := range s1.MinioEnv {
|
||||
ev, ok := s2.MinioEnv[k]
|
||||
if !ok {
|
||||
missing = append(missing, k)
|
||||
} else if v != ev {
|
||||
mismatching = append(mismatching, k)
|
||||
}
|
||||
}
|
||||
var extra []string
|
||||
for k := range s2.MinioEnv {
|
||||
_, ok := s1.MinioEnv[k]
|
||||
if !ok {
|
||||
extra = append(extra, k)
|
||||
}
|
||||
}
|
||||
msg := "Expected same MINIO_ environment variables and values across all servers: "
|
||||
if len(missing) > 0 {
|
||||
msg += fmt.Sprintf(`Missing environment values: %v. `, missing)
|
||||
}
|
||||
if len(mismatching) > 0 {
|
||||
msg += fmt.Sprintf(`Mismatching environment values: %v. `, mismatching)
|
||||
}
|
||||
if len(extra) > 0 {
|
||||
msg += fmt.Sprintf(`Extra environment values: %v. `, extra)
|
||||
}
|
||||
|
||||
return errors.New(strings.TrimSpace(msg))
|
||||
}
|
||||
|
||||
var skipEnvs = map[string]struct{}{
|
||||
|
||||
@@ -51,6 +51,7 @@ import (
|
||||
sse "github.com/minio/minio/internal/bucket/encryption"
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/config/cache"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/event"
|
||||
@@ -474,9 +475,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
|
||||
deleteObjectsFn := objectAPI.DeleteObjects
|
||||
if api.CacheAPI() != nil {
|
||||
deleteObjectsFn = api.CacheAPI().DeleteObjects
|
||||
}
|
||||
|
||||
// Return Malformed XML as S3 spec if the number of objects is empty
|
||||
if len(deleteObjectsReq.Objects) == 0 || len(deleteObjectsReq.Objects) > maxDeleteList {
|
||||
@@ -486,9 +484,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
objectsToDelete := map[ObjectToDelete]int{}
|
||||
getObjectInfoFn := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
var (
|
||||
hasLockEnabled bool
|
||||
@@ -674,6 +669,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
continue
|
||||
}
|
||||
|
||||
defer globalCacheConfig.Delete(bucket, dobj.ObjectName)
|
||||
|
||||
if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == Pending) {
|
||||
// copy so we can re-add null ID.
|
||||
dobj := dobj
|
||||
@@ -716,7 +713,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
if os == nil { // skip objects that weren't deleted due to invalid versionID etc.
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
os.Sweep()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -794,7 +791,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
// check if client is attempting to create more buckets, complain about it.
|
||||
if currBuckets := globalBucketMetadataSys.Count(); currBuckets+1 > maxBuckets {
|
||||
logger.LogIf(ctx, fmt.Errorf("An attempt to create %d buckets beyond recommended %d", currBuckets+1, maxBuckets))
|
||||
logger.LogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets))
|
||||
}
|
||||
|
||||
opts := MakeBucketOptions{
|
||||
@@ -989,7 +986,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
if fileName == "" {
|
||||
if name != "file" {
|
||||
if http.CanonicalHeaderKey(name) == http.CanonicalHeaderKey("x-minio-fanout-list") {
|
||||
dec := json.NewDecoder(part)
|
||||
|
||||
@@ -1045,11 +1042,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
break
|
||||
}
|
||||
|
||||
if _, ok := formValues["Key"]; !ok {
|
||||
if keyName, ok := formValues["Key"]; !ok {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The name of the uploaded key is missing"))
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
} else if fileName == "" && len(keyName) >= 1 {
|
||||
// if we can't get fileName. We use keyName[0] to fileName
|
||||
fileName = keyName[0]
|
||||
}
|
||||
|
||||
if fileName == "" {
|
||||
@@ -1139,9 +1139,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(reader, fileSize, "", "", fileSize)
|
||||
hashReader, err := hash.NewReader(ctx, reader, fileSize, "", "", fileSize)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1196,7 +1195,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
})
|
||||
|
||||
var opts ObjectOptions
|
||||
opts, err = putOpts(ctx, r, bucket, object, metadata)
|
||||
opts, err = putOptsFromReq(ctx, r, bucket, object, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
@@ -1254,7 +1253,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
// do not try to verify encrypted content/
|
||||
hashReader, err = hash.NewReader(reader, -1, "", "", -1)
|
||||
hashReader, err = hash.NewReader(ctx, reader, -1, "", "", -1)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1347,6 +1346,22 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
continue
|
||||
}
|
||||
|
||||
asize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
asize = objInfo.Size
|
||||
}
|
||||
|
||||
globalCacheConfig.Set(&cache.ObjectInfo{
|
||||
Key: objInfo.Name,
|
||||
Bucket: objInfo.Bucket,
|
||||
ETag: getDecryptedETag(formValues, objInfo, false),
|
||||
ModTime: objInfo.ModTime,
|
||||
Expires: objInfo.Expires.UTC().Format(http.TimeFormat),
|
||||
CacheControl: objInfo.CacheControl,
|
||||
Metadata: cleanReservedKeys(objInfo.UserDefined),
|
||||
Size: asize,
|
||||
})
|
||||
|
||||
fanOutResp = append(fanOutResp, minio.PutObjectFanOutResponse{
|
||||
Key: objInfo.Name,
|
||||
ETag: getDecryptedETag(formValues, objInfo, false),
|
||||
@@ -1403,10 +1418,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
etag := getDecryptedETag(formValues, objInfo, false)
|
||||
|
||||
// We must not use the http.Header().Set method here because some (broken)
|
||||
// clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive).
|
||||
// Therefore, we have to set the ETag directly as map entry.
|
||||
w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`}
|
||||
w.Header()[xhttp.ETag] = []string{`"` + etag + `"`}
|
||||
|
||||
// Set the relevant version ID as part of the response header.
|
||||
if objInfo.VersionID != "" && objInfo.VersionID != nullVersionID {
|
||||
@@ -1417,6 +1434,22 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
w.Header().Set(xhttp.Location, obj)
|
||||
}
|
||||
|
||||
asize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
asize = objInfo.Size
|
||||
}
|
||||
|
||||
defer globalCacheConfig.Set(&cache.ObjectInfo{
|
||||
Key: objInfo.Name,
|
||||
Bucket: objInfo.Bucket,
|
||||
ETag: etag,
|
||||
ModTime: objInfo.ModTime,
|
||||
Expires: objInfo.ExpiresStr(),
|
||||
CacheControl: objInfo.CacheControl,
|
||||
Metadata: cleanReservedKeys(objInfo.UserDefined),
|
||||
Size: asize,
|
||||
})
|
||||
|
||||
// Notify object created event.
|
||||
defer sendEvent(eventArgs{
|
||||
EventName: event.ObjectCreatedPost,
|
||||
|
||||
@@ -25,6 +25,7 @@ type lcEventSrc uint8
|
||||
//revive:disable:var-naming Underscores is used here to indicate where common prefix ends and the enumeration name begins
|
||||
const (
|
||||
lcEventSrc_None lcEventSrc = iota
|
||||
lcEventSrc_Heal
|
||||
lcEventSrc_Scanner
|
||||
lcEventSrc_Decom
|
||||
lcEventSrc_Rebal
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
@@ -86,6 +87,41 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
// Create a map of updated set of rules in request
|
||||
updatedRules := make(map[string]lifecycle.Rule, len(bucketLifecycle.Rules))
|
||||
for _, rule := range bucketLifecycle.Rules {
|
||||
updatedRules[rule.ID] = rule
|
||||
}
|
||||
|
||||
// Get list of rules for the bucket from disk
|
||||
meta, err := globalBucketMetadataSys.GetConfigFromDisk(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
expiryRuleRemoved := false
|
||||
if len(meta.LifecycleConfigXML) > 0 {
|
||||
var lcCfg lifecycle.Lifecycle
|
||||
if err := xml.Unmarshal(meta.LifecycleConfigXML, &lcCfg); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, rl := range lcCfg.Rules {
|
||||
updRule, ok := updatedRules[rl.ID]
|
||||
// original rule had expiry that is no longer in the new config,
|
||||
// or rule is present but missing expiration flags
|
||||
if (!rl.Expiration.IsNull() || !rl.NoncurrentVersionExpiration.IsNull()) &&
|
||||
(!ok || (updRule.Expiration.IsNull() && updRule.NoncurrentVersionExpiration.IsNull())) {
|
||||
expiryRuleRemoved = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if bucketLifecycle.HasExpiry() || expiryRuleRemoved {
|
||||
currtime := time.Now()
|
||||
bucketLifecycle.ExpiryUpdatedAt = &currtime
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(bucketLifecycle)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -142,6 +178,8 @@ func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// explicitly set ExpiryUpdatedAt nil as its meant for internal consumption only
|
||||
config.ExpiryUpdatedAt = nil
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
|
||||
@@ -42,7 +42,9 @@ import (
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/s3select"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"github.com/zeebo/xxh3"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -217,17 +219,24 @@ type transitionState struct {
|
||||
numWorkers int
|
||||
killCh chan struct{}
|
||||
|
||||
activeTasks int32
|
||||
activeTasks atomic.Int64
|
||||
missedImmediateTasks atomic.Int64
|
||||
|
||||
lastDayMu sync.RWMutex
|
||||
lastDayStats map[string]*lastDayTierStats
|
||||
}
|
||||
|
||||
func (t *transitionState) queueTransitionTask(oi ObjectInfo, event lifecycle.Event, src lcEventSrc) {
|
||||
task := transitionTask{objInfo: oi, event: event, src: src}
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
case t.transitionCh <- transitionTask{objInfo: oi, event: event, src: src}:
|
||||
case t.transitionCh <- task:
|
||||
default:
|
||||
switch src {
|
||||
case lcEventSrc_s3PutObject, lcEventSrc_s3CopyObject, lcEventSrc_s3CompleteMultipartUpload:
|
||||
// Update missed immediate tasks only for incoming requests.
|
||||
t.missedImmediateTasks.Add(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -237,7 +246,7 @@ var globalTransitionState *transitionState
|
||||
// via its Init method.
|
||||
func newTransitionState(ctx context.Context) *transitionState {
|
||||
return &transitionState{
|
||||
transitionCh: make(chan transitionTask, 10000),
|
||||
transitionCh: make(chan transitionTask, 100000),
|
||||
ctx: ctx,
|
||||
killCh: make(chan struct{}),
|
||||
lastDayStats: make(map[string]*lastDayTierStats),
|
||||
@@ -262,8 +271,14 @@ func (t *transitionState) PendingTasks() int {
|
||||
}
|
||||
|
||||
// ActiveTasks returns the number of active (ongoing) ILM transition tasks.
|
||||
func (t *transitionState) ActiveTasks() int {
|
||||
return int(atomic.LoadInt32(&t.activeTasks))
|
||||
func (t *transitionState) ActiveTasks() int64 {
|
||||
return t.activeTasks.Load()
|
||||
}
|
||||
|
||||
// MissedImmediateTasks returns the number of tasks - deferred to scanner due
|
||||
// to tasks channel being backlogged.
|
||||
func (t *transitionState) MissedImmediateTasks() int64 {
|
||||
return t.missedImmediateTasks.Load()
|
||||
}
|
||||
|
||||
// worker waits for transition tasks
|
||||
@@ -278,11 +293,13 @@ func (t *transitionState) worker(objectAPI ObjectLayer) {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
atomic.AddInt32(&t.activeTasks, 1)
|
||||
t.activeTasks.Add(1)
|
||||
if err := transitionObject(t.ctx, objectAPI, task.objInfo, newLifecycleAuditEvent(task.src, task.event)); err != nil {
|
||||
if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
|
||||
logger.LogIf(t.ctx, fmt.Errorf("Transition to %s failed for %s/%s version:%s with %w",
|
||||
task.event.StorageClass, task.objInfo.Bucket, task.objInfo.Name, task.objInfo.VersionID, err))
|
||||
if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !xnet.IsNetworkOrHostDown(err, false) {
|
||||
if !strings.Contains(err.Error(), "use of closed network connection") {
|
||||
logger.LogIf(t.ctx, fmt.Errorf("Transition to %s failed for %s/%s version:%s with %w",
|
||||
task.event.StorageClass, task.objInfo.Bucket, task.objInfo.Name, task.objInfo.VersionID, err))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ts := tierStats{
|
||||
@@ -294,7 +311,7 @@ func (t *transitionState) worker(objectAPI ObjectLayer) {
|
||||
}
|
||||
t.addLastDayStats(task.event.StorageClass, ts)
|
||||
}
|
||||
atomic.AddInt32(&t.activeTasks, -1)
|
||||
t.activeTasks.Add(-1)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -444,7 +461,8 @@ func genTransitionObjName(bucket string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
us := u.String()
|
||||
obj := fmt.Sprintf("%s/%s/%s/%s/%s", globalDeploymentID, bucket, us[0:2], us[2:4], us)
|
||||
hash := xxh3.HashString(pathJoin(globalDeploymentID(), bucket))
|
||||
obj := fmt.Sprintf("%s/%s/%s/%s", strconv.FormatUint(hash, 16), us[0:2], us[2:4], us)
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
@@ -862,6 +880,7 @@ func (oi ObjectInfo) ToLifecycleOpts() lifecycle.ObjectOpts {
|
||||
UserTags: oi.UserTags,
|
||||
VersionID: oi.VersionID,
|
||||
ModTime: oi.ModTime,
|
||||
Size: oi.Size,
|
||||
IsLatest: oi.IsLatest,
|
||||
NumVersions: oi.NumVersions,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
@@ -177,6 +178,40 @@ func (sys *BucketMetadataSys) save(ctx context.Context, meta BucketMetadata) err
|
||||
// Delete delete the bucket metadata for the specified bucket.
|
||||
// must be used by all callers instead of using Update() with nil configData.
|
||||
func (sys *BucketMetadataSys) Delete(ctx context.Context, bucket string, configFile string) (updatedAt time.Time, err error) {
|
||||
if configFile == bucketLifecycleConfig {
|
||||
// Get bucket config from current site
|
||||
meta, e := globalBucketMetadataSys.GetConfigFromDisk(ctx, bucket)
|
||||
if e != nil && !errors.Is(e, errConfigNotFound) {
|
||||
return updatedAt, e
|
||||
}
|
||||
var expiryRuleRemoved bool
|
||||
if len(meta.LifecycleConfigXML) > 0 {
|
||||
var lcCfg lifecycle.Lifecycle
|
||||
if err := xml.Unmarshal(meta.LifecycleConfigXML, &lcCfg); err != nil {
|
||||
return updatedAt, err
|
||||
}
|
||||
// find a single expiry rule set the flag
|
||||
for _, rl := range lcCfg.Rules {
|
||||
if !rl.Expiration.IsNull() || !rl.NoncurrentVersionExpiration.IsNull() {
|
||||
expiryRuleRemoved = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Form empty ILM details with `ExpiryUpdatedAt` field and save
|
||||
var cfgData []byte
|
||||
if expiryRuleRemoved {
|
||||
var lcCfg lifecycle.Lifecycle
|
||||
currtime := time.Now()
|
||||
lcCfg.ExpiryUpdatedAt = &currtime
|
||||
cfgData, err = xml.Marshal(lcCfg)
|
||||
if err != nil {
|
||||
return updatedAt, err
|
||||
}
|
||||
}
|
||||
return sys.updateAndParse(ctx, bucket, configFile, cfgData, false)
|
||||
}
|
||||
return sys.updateAndParse(ctx, bucket, configFile, nil, false)
|
||||
}
|
||||
|
||||
@@ -267,7 +302,10 @@ func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Life
|
||||
}
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.lifecycleConfig == nil {
|
||||
// there could be just `ExpiryUpdatedAt` field populated as part
|
||||
// of last delete all. Treat this situation as not lifecycle configuration
|
||||
// available
|
||||
if meta.lifecycleConfig == nil || len(meta.lifecycleConfig.Rules) == 0 {
|
||||
return nil, time.Time{}, BucketLifecycleNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.lifecycleConfig, meta.LifecycleConfigUpdatedAt, nil
|
||||
@@ -437,21 +475,8 @@ func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, ob
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sys *BucketMetadataSys) loadBucketMetadata(ctx context.Context, bucket BucketInfo) error {
|
||||
meta, err := loadBucketMetadata(ctx, sys.objAPI, bucket.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
sys.metadataMap[bucket.Name] = meta
|
||||
sys.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// concurrently load bucket metadata to speed up loading bucket metadata.
|
||||
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo) {
|
||||
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo, failedBuckets map[string]struct{}) {
|
||||
g := errgroup.WithNErrs(len(buckets))
|
||||
bucketMetas := make([]BucketMetadata, len(buckets))
|
||||
for index := range buckets {
|
||||
@@ -491,6 +516,10 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
|
||||
for i, meta := range bucketMetas {
|
||||
if errs[i] != nil {
|
||||
if failedBuckets == nil {
|
||||
failedBuckets = make(map[string]struct{})
|
||||
}
|
||||
failedBuckets[buckets[i].Name] = struct{}{}
|
||||
continue
|
||||
}
|
||||
globalEventNotifier.set(buckets[i], meta) // set notification targets
|
||||
@@ -498,7 +527,7 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
}
|
||||
}
|
||||
|
||||
func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context) {
|
||||
func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, failedBuckets map[string]struct{}) {
|
||||
const bucketMetadataRefresh = 15 * time.Minute
|
||||
|
||||
sleeper := newDynamicSleeper(2, 150*time.Millisecond, false)
|
||||
@@ -513,7 +542,7 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context) {
|
||||
buckets, err := sys.objAPI.ListBuckets(ctx, BucketOptions{})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
break
|
||||
}
|
||||
|
||||
// Handle if we have some buckets in-memory those are stale.
|
||||
@@ -525,38 +554,49 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context) {
|
||||
}
|
||||
sys.RemoveStaleBuckets(diskBuckets)
|
||||
|
||||
for _, bucket := range buckets {
|
||||
for i := range buckets {
|
||||
wait := sleeper.Timer(ctx)
|
||||
|
||||
err := sys.loadBucketMetadata(ctx, bucket)
|
||||
meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[i].Name)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
wait() // wait to proceed to next entry.
|
||||
continue
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
sys.metadataMap[buckets[i].Name] = meta
|
||||
sys.Unlock()
|
||||
|
||||
// Initialize the failed buckets
|
||||
if _, ok := failedBuckets[buckets[i].Name]; ok {
|
||||
globalEventNotifier.set(buckets[i], meta)
|
||||
globalBucketTargetSys.set(buckets[i], meta)
|
||||
delete(failedBuckets, buckets[i].Name)
|
||||
}
|
||||
|
||||
wait() // wait to proceed to next entry.
|
||||
}
|
||||
|
||||
t.Reset(bucketMetadataRefresh)
|
||||
}
|
||||
t.Reset(bucketMetadataRefresh)
|
||||
}
|
||||
}
|
||||
|
||||
// Loads bucket metadata for all buckets into BucketMetadataSys.
|
||||
func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
|
||||
count := 100 // load 100 bucket metadata at a time.
|
||||
failedBuckets := make(map[string]struct{})
|
||||
for {
|
||||
if len(buckets) < count {
|
||||
sys.concurrentLoad(ctx, buckets)
|
||||
sys.concurrentLoad(ctx, buckets, failedBuckets)
|
||||
break
|
||||
}
|
||||
sys.concurrentLoad(ctx, buckets[:count])
|
||||
sys.concurrentLoad(ctx, buckets[:count], failedBuckets)
|
||||
buckets = buckets[count:]
|
||||
}
|
||||
|
||||
if globalIsDistErasure {
|
||||
go sys.refreshBucketsMetadataLoop(ctx)
|
||||
go sys.refreshBucketsMetadataLoop(ctx, failedBuckets)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -137,6 +137,10 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s
|
||||
}
|
||||
|
||||
cloneHeader := r.Header.Clone()
|
||||
if v := cloneHeader.Get("x-amz-signature-age"); v != "" {
|
||||
args["signatureAge"] = []string{v}
|
||||
cloneHeader.Del("x-amz-signature-age")
|
||||
}
|
||||
|
||||
if userTags := cloneHeader.Get(xhttp.AmzObjectTagging); userTags != "" {
|
||||
tag, _ := tags.ParseObjectTags(userTags)
|
||||
|
||||
@@ -29,9 +29,7 @@ import (
|
||||
)
|
||||
|
||||
// BucketQuotaSys - map of bucket and quota configuration.
|
||||
type BucketQuotaSys struct {
|
||||
bucketStorageCache timedValue
|
||||
}
|
||||
type BucketQuotaSys struct{}
|
||||
|
||||
// Get - Get quota configuration.
|
||||
func (sys *BucketQuotaSys) Get(ctx context.Context, bucketName string) (*madmin.BucketQuota, error) {
|
||||
@@ -44,16 +42,18 @@ func NewBucketQuotaSys() *BucketQuotaSys {
|
||||
return &BucketQuotaSys{}
|
||||
}
|
||||
|
||||
var bucketStorageCache timedValue
|
||||
|
||||
// Init initialize bucket quota.
|
||||
func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
||||
sys.bucketStorageCache.Once.Do(func() {
|
||||
bucketStorageCache.Once.Do(func() {
|
||||
// Set this to 10 secs since its enough, as scanner
|
||||
// does not update the bucket usage values frequently.
|
||||
sys.bucketStorageCache.TTL = 10 * time.Second
|
||||
bucketStorageCache.TTL = 10 * time.Second
|
||||
// Rely on older value if usage loading fails from disk.
|
||||
sys.bucketStorageCache.Relax = true
|
||||
sys.bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
bucketStorageCache.Relax = true
|
||||
bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
@@ -63,16 +63,17 @@ func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
||||
|
||||
// GetBucketUsageInfo return bucket usage info for a given bucket
|
||||
func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, error) {
|
||||
v, err := sys.bucketStorageCache.Get()
|
||||
if err != nil && v != nil {
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)", bucket, err), "bucket-usage-cache-"+bucket)
|
||||
}
|
||||
if v == nil {
|
||||
logger.LogOnceIf(GlobalContext, errors.New("unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced"), "bucket-usage-empty-"+bucket)
|
||||
v, err := bucketStorageCache.Get()
|
||||
timedout := OperationTimedOut{}
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &timedout) {
|
||||
if v != nil {
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)", bucket, err), "bucket-usage-cache-"+bucket)
|
||||
} else {
|
||||
logger.LogOnceIf(GlobalContext, errors.New("unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced"), "bucket-usage-empty-"+bucket)
|
||||
}
|
||||
}
|
||||
|
||||
var bui BucketUsageInfo
|
||||
|
||||
dui, ok := v.(DataUsageInfo)
|
||||
if ok {
|
||||
bui = dui.BucketsUsage[bucket]
|
||||
|
||||
@@ -476,27 +476,17 @@ func (api objectAPIHandlers) ResetBucketReplicationStatusHandler(w http.Response
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
var tgtStats map[string]TargetReplicationResyncStatus
|
||||
globalReplicationPool.resyncer.RLock()
|
||||
brs, ok := globalReplicationPool.resyncer.statusMap[bucket]
|
||||
if ok {
|
||||
tgtStats = brs.cloneTgtStats()
|
||||
}
|
||||
globalReplicationPool.resyncer.RUnlock()
|
||||
if !ok {
|
||||
brs, err = loadBucketResyncMetadata(ctx, bucket, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("No replication resync status available for %s", arn),
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
tgtStats = brs.cloneTgtStats()
|
||||
brs, err := loadBucketResyncMetadata(ctx, bucket, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("replication resync status not available for %s (%s)", arn, err.Error()),
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var rinfo ResyncTargetsInfo
|
||||
for tarn, st := range tgtStats {
|
||||
for tarn, st := range brs.TargetsMap {
|
||||
if arn != "" && tarn != arn {
|
||||
continue
|
||||
}
|
||||
@@ -585,7 +575,7 @@ func (api objectAPIHandlers) ValidateBucketReplicationCredsHandler(w http.Respon
|
||||
if rule.Status == replication.Disabled {
|
||||
continue
|
||||
}
|
||||
clnt := globalBucketTargetSys.GetRemoteTargetClient(rule.Destination.Bucket)
|
||||
clnt := globalBucketTargetSys.GetRemoteTargetClient(bucket, rule.Destination.Bucket)
|
||||
if clnt == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrRemoteTargetNotFoundError, fmt.Errorf("replication config with rule ID %s has a stale target", rule.ID)), r.URL)
|
||||
return
|
||||
|
||||
@@ -31,8 +31,8 @@ import (
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
//go:generate msgp -file=$GOFILE
|
||||
@@ -167,7 +167,21 @@ func (ri replicatedInfos) Action() replicationAction {
|
||||
var replStatusRegex = regexp.MustCompile(`([^=].*?)=([^,].*?);`)
|
||||
|
||||
// TargetReplicationStatus - returns replication status of a target
|
||||
func (o *ObjectInfo) TargetReplicationStatus(arn string) (status replication.StatusType) {
|
||||
func (ri ReplicateObjectInfo) TargetReplicationStatus(arn string) (status replication.StatusType) {
|
||||
repStatMatches := replStatusRegex.FindAllStringSubmatch(ri.ReplicationStatusInternal, -1)
|
||||
for _, repStatMatch := range repStatMatches {
|
||||
if len(repStatMatch) != 3 {
|
||||
return
|
||||
}
|
||||
if repStatMatch[1] == arn {
|
||||
return replication.StatusType(repStatMatch[2])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// TargetReplicationStatus - returns replication status of a target
|
||||
func (o ObjectInfo) TargetReplicationStatus(arn string) (status replication.StatusType) {
|
||||
repStatMatches := replStatusRegex.FindAllStringSubmatch(o.ReplicationStatusInternal, -1)
|
||||
for _, repStatMatch := range repStatMatches {
|
||||
if len(repStatMatch) != 3 {
|
||||
@@ -185,7 +199,6 @@ type replicateTargetDecision struct {
|
||||
Synchronous bool // Synchronous replication configured.
|
||||
Arn string // ARN of replication target
|
||||
ID string
|
||||
Tgt *TargetClient
|
||||
}
|
||||
|
||||
func (t *replicateTargetDecision) String() string {
|
||||
@@ -207,7 +220,7 @@ type ReplicateDecision struct {
|
||||
}
|
||||
|
||||
// ReplicateAny returns true if atleast one target qualifies for replication
|
||||
func (d *ReplicateDecision) ReplicateAny() bool {
|
||||
func (d ReplicateDecision) ReplicateAny() bool {
|
||||
for _, t := range d.targetsMap {
|
||||
if t.Replicate {
|
||||
return true
|
||||
@@ -217,7 +230,7 @@ func (d *ReplicateDecision) ReplicateAny() bool {
|
||||
}
|
||||
|
||||
// Synchronous returns true if atleast one target qualifies for synchronous replication
|
||||
func (d *ReplicateDecision) Synchronous() bool {
|
||||
func (d ReplicateDecision) Synchronous() bool {
|
||||
for _, t := range d.targetsMap {
|
||||
if t.Synchronous {
|
||||
return true
|
||||
@@ -226,7 +239,7 @@ func (d *ReplicateDecision) Synchronous() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *ReplicateDecision) String() string {
|
||||
func (d ReplicateDecision) String() string {
|
||||
b := new(bytes.Buffer)
|
||||
for key, value := range d.targetsMap {
|
||||
fmt.Fprintf(b, "%s=%s,", key, value.String())
|
||||
@@ -243,7 +256,7 @@ func (d *ReplicateDecision) Set(t replicateTargetDecision) {
|
||||
}
|
||||
|
||||
// PendingStatus returns a stringified representation of internal replication status with all targets marked as `PENDING`
|
||||
func (d *ReplicateDecision) PendingStatus() string {
|
||||
func (d ReplicateDecision) PendingStatus() string {
|
||||
b := new(bytes.Buffer)
|
||||
for _, k := range d.targetsMap {
|
||||
if k.Replicate {
|
||||
@@ -259,11 +272,11 @@ type ResyncDecision struct {
|
||||
}
|
||||
|
||||
// Empty returns true if no targets with resync decision present
|
||||
func (r *ResyncDecision) Empty() bool {
|
||||
func (r ResyncDecision) Empty() bool {
|
||||
return r.targets == nil
|
||||
}
|
||||
|
||||
func (r *ResyncDecision) mustResync() bool {
|
||||
func (r ResyncDecision) mustResync() bool {
|
||||
for _, v := range r.targets {
|
||||
if v.Replicate {
|
||||
return true
|
||||
@@ -272,15 +285,12 @@ func (r *ResyncDecision) mustResync() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *ResyncDecision) mustResyncTarget(tgtArn string) bool {
|
||||
func (r ResyncDecision) mustResyncTarget(tgtArn string) bool {
|
||||
if r.targets == nil {
|
||||
return false
|
||||
}
|
||||
v, ok := r.targets[tgtArn]
|
||||
if ok && v.Replicate {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return ok && v.Replicate
|
||||
}
|
||||
|
||||
// ResyncTargetDecision is struct that represents resync decision for this target
|
||||
@@ -301,35 +311,20 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD
|
||||
if len(s) == 0 {
|
||||
return
|
||||
}
|
||||
pairs := strings.Split(s, ",")
|
||||
for _, p := range pairs {
|
||||
for _, p := range strings.Split(s, ",") {
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
slc := strings.Split(p, "=")
|
||||
if len(slc) != 2 {
|
||||
return r, errInvalidReplicateDecisionFormat
|
||||
}
|
||||
tgtStr := strings.TrimPrefix(slc[1], "\"")
|
||||
tgtStr = strings.TrimSuffix(tgtStr, "\"")
|
||||
tgtStr := strings.TrimSuffix(strings.TrimPrefix(slc[1], `"`), `"`)
|
||||
tgt := strings.Split(tgtStr, ";")
|
||||
if len(tgt) != 4 {
|
||||
return r, errInvalidReplicateDecisionFormat
|
||||
}
|
||||
var replicate, sync bool
|
||||
var err error
|
||||
replicate, err = strconv.ParseBool(tgt[0])
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
sync, err = strconv.ParseBool(tgt[1])
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
tgtClnt := globalBucketTargetSys.GetRemoteTargetClient(slc[0])
|
||||
if tgtClnt == nil {
|
||||
// Skip stale targets if any and log them to be missing atleast once.
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, slc[0]), slc[0])
|
||||
// We save the targetDecision even when its not configured or stale.
|
||||
}
|
||||
r.targetsMap[slc[0]] = replicateTargetDecision{Replicate: replicate, Synchronous: sync, Arn: tgt[2], ID: tgt[3], Tgt: tgtClnt}
|
||||
r.targetsMap[slc[0]] = replicateTargetDecision{Replicate: tgt[0] == "true", Synchronous: tgt[1] == "true", Arn: tgt[2], ID: tgt[3]}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -496,8 +491,8 @@ func getCompositeVersionPurgeStatus(m map[string]VersionPurgeStatusType) Version
|
||||
}
|
||||
|
||||
// getHealReplicateObjectInfo returns info needed by heal replication in ReplicateObjectInfo
|
||||
func getHealReplicateObjectInfo(objInfo ObjectInfo, rcfg replicationConfig) ReplicateObjectInfo {
|
||||
oi := objInfo.Clone()
|
||||
func getHealReplicateObjectInfo(oi ObjectInfo, rcfg replicationConfig) ReplicateObjectInfo {
|
||||
userDefined := cloneMSS(oi.UserDefined)
|
||||
if rcfg.Config != nil && rcfg.Config.RoleArn != "" {
|
||||
// For backward compatibility of objects pending/failed replication.
|
||||
// Save replication related statuses in the new internal representation for
|
||||
@@ -508,17 +503,15 @@ func getHealReplicateObjectInfo(objInfo ObjectInfo, rcfg replicationConfig) Repl
|
||||
if !oi.VersionPurgeStatus.Empty() {
|
||||
oi.VersionPurgeStatusInternal = fmt.Sprintf("%s=%s;", rcfg.Config.RoleArn, oi.VersionPurgeStatus)
|
||||
}
|
||||
for k, v := range oi.UserDefined {
|
||||
for k, v := range userDefined {
|
||||
if strings.EqualFold(k, ReservedMetadataPrefixLower+ReplicationReset) {
|
||||
delete(oi.UserDefined, k)
|
||||
oi.UserDefined[targetResetHeader(rcfg.Config.RoleArn)] = v
|
||||
delete(userDefined, k)
|
||||
userDefined[targetResetHeader(rcfg.Config.RoleArn)] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
var dsc ReplicateDecision
|
||||
var tgtStatuses map[string]replication.StatusType
|
||||
var purgeStatuses map[string]VersionPurgeStatusType
|
||||
|
||||
var dsc ReplicateDecision
|
||||
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
|
||||
dsc = checkReplicateDelete(GlobalContext, oi.Bucket, ObjectToDelete{
|
||||
ObjectV: ObjectV{
|
||||
@@ -530,33 +523,45 @@ func getHealReplicateObjectInfo(objInfo ObjectInfo, rcfg replicationConfig) Repl
|
||||
VersionSuspended: globalBucketVersioningSys.PrefixSuspended(oi.Bucket, oi.Name),
|
||||
}, nil)
|
||||
} else {
|
||||
dsc = mustReplicate(GlobalContext, oi.Bucket, oi.Name, getMustReplicateOptions(ObjectInfo{
|
||||
UserDefined: oi.UserDefined,
|
||||
}, replication.HealReplicationType, ObjectOptions{}))
|
||||
dsc = mustReplicate(GlobalContext, oi.Bucket, oi.Name, getMustReplicateOptions(userDefined, oi.UserTags, "", replication.HealReplicationType, ObjectOptions{}))
|
||||
}
|
||||
tgtStatuses = replicationStatusesMap(oi.ReplicationStatusInternal)
|
||||
purgeStatuses = versionPurgeStatusesMap(oi.VersionPurgeStatusInternal)
|
||||
existingObjResync := rcfg.Resync(GlobalContext, oi, &dsc, tgtStatuses)
|
||||
tm, _ := time.Parse(time.RFC3339Nano, oi.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp])
|
||||
|
||||
tgtStatuses := replicationStatusesMap(oi.ReplicationStatusInternal)
|
||||
purgeStatuses := versionPurgeStatusesMap(oi.VersionPurgeStatusInternal)
|
||||
existingObjResync := rcfg.Resync(GlobalContext, oi, dsc, tgtStatuses)
|
||||
tm, _ := time.Parse(time.RFC3339Nano, userDefined[ReservedMetadataPrefixLower+ReplicationTimestamp])
|
||||
rstate := oi.ReplicationState()
|
||||
rstate.ReplicateDecisionStr = dsc.String()
|
||||
asz, _ := oi.GetActualSize()
|
||||
|
||||
return ReplicateObjectInfo{
|
||||
ObjectInfo: oi,
|
||||
Name: oi.Name,
|
||||
Size: oi.Size,
|
||||
ActualSize: asz,
|
||||
Bucket: oi.Bucket,
|
||||
VersionID: oi.VersionID,
|
||||
ETag: oi.ETag,
|
||||
ModTime: oi.ModTime,
|
||||
ReplicationStatus: oi.ReplicationStatus,
|
||||
ReplicationStatusInternal: oi.ReplicationStatusInternal,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
VersionPurgeStatusInternal: oi.VersionPurgeStatusInternal,
|
||||
VersionPurgeStatus: oi.VersionPurgeStatus,
|
||||
|
||||
ReplicationState: rstate,
|
||||
OpType: replication.HealReplicationType,
|
||||
Dsc: dsc,
|
||||
ExistingObjResync: existingObjResync,
|
||||
TargetStatuses: tgtStatuses,
|
||||
TargetPurgeStatuses: purgeStatuses,
|
||||
ReplicationTimestamp: tm,
|
||||
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
|
||||
UserTags: oi.UserTags,
|
||||
}
|
||||
}
|
||||
|
||||
func (ri *ReplicateObjectInfo) getReplicationState() ReplicationState {
|
||||
rs := ri.ObjectInfo.getReplicationState()
|
||||
rs.ReplicateDecisionStr = ri.Dsc.String()
|
||||
return rs
|
||||
}
|
||||
|
||||
// vID here represents the versionID client specified in request - need to distinguish between delete marker and delete marker deletion
|
||||
func (o *ObjectInfo) getReplicationState() ReplicationState {
|
||||
// ReplicationState - returns replication state using other internal replication metadata in ObjectInfo
|
||||
func (o ObjectInfo) ReplicationState() ReplicationState {
|
||||
rs := ReplicationState{
|
||||
ReplicationStatusInternal: o.ReplicationStatusInternal,
|
||||
VersionPurgeStatusInternal: o.VersionPurgeStatusInternal,
|
||||
@@ -577,7 +582,7 @@ func (o *ObjectInfo) getReplicationState() ReplicationState {
|
||||
}
|
||||
|
||||
// ReplicationState returns replication state using other internal replication metadata in ObjectToDelete
|
||||
func (o *ObjectToDelete) ReplicationState() ReplicationState {
|
||||
func (o ObjectToDelete) ReplicationState() ReplicationState {
|
||||
r := ReplicationState{
|
||||
ReplicationStatusInternal: o.DeleteMarkerReplicationStatus,
|
||||
VersionPurgeStatusInternal: o.VersionPurgeStatuses,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -88,7 +88,7 @@ var replicationConfigTests = []struct {
|
||||
func TestReplicationResync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
for i, test := range replicationConfigTests {
|
||||
if sync := test.rcfg.Resync(ctx, test.info, &test.dsc, test.tgtStatuses); sync.mustResync() != test.expectedSync {
|
||||
if sync := test.rcfg.Resync(ctx, test.info, test.dsc, test.tgtStatuses); sync.mustResync() != test.expectedSync {
|
||||
t.Errorf("Test%d (%s): Resync got %t , want %t", i+1, test.name, sync.mustResync(), test.expectedSync)
|
||||
}
|
||||
}
|
||||
@@ -283,7 +283,7 @@ var (
|
||||
|
||||
func TestReplicationResyncwrapper(t *testing.T) {
|
||||
for i, test := range replicationConfigTests2 {
|
||||
if sync := test.rcfg.resync(test.info, &test.dsc, test.tgtStatuses); sync.mustResync() != test.expectedSync {
|
||||
if sync := test.rcfg.resync(test.info, test.dsc, test.tgtStatuses); sync.mustResync() != test.expectedSync {
|
||||
t.Errorf("%s (%s): Replicationresync got %t , want %t", fmt.Sprintf("Test%d - %s", i+1, time.Now().Format(http.TimeFormat)), test.name, sync.mustResync(), test.expectedSync)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,14 +40,29 @@ const (
|
||||
defaultHealthCheckReloadDuration = 30 * time.Minute
|
||||
)
|
||||
|
||||
type arnTarget struct {
|
||||
Client *TargetClient
|
||||
lastRefresh time.Time
|
||||
}
|
||||
|
||||
// arnErrs represents number of errors seen for a ARN and if update is in progress
|
||||
// to refresh remote targets from bucket metadata.
|
||||
type arnErrs struct {
|
||||
count int64
|
||||
updateInProgress bool
|
||||
bucket string
|
||||
}
|
||||
|
||||
// BucketTargetSys represents bucket targets subsystem
|
||||
type BucketTargetSys struct {
|
||||
sync.RWMutex
|
||||
arnRemotesMap map[string]*TargetClient
|
||||
arnRemotesMap map[string]arnTarget
|
||||
targetsMap map[string][]madmin.BucketTarget
|
||||
hMutex sync.RWMutex
|
||||
hc map[string]epHealth
|
||||
hcClient *madmin.AnonymousClient
|
||||
aMutex sync.RWMutex
|
||||
arnErrsMap map[string]arnErrs // map of ARN to error count of failures to get target
|
||||
}
|
||||
|
||||
type latencyStat struct {
|
||||
@@ -364,7 +379,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
}
|
||||
|
||||
sys.targetsMap[bucket] = newtgts
|
||||
sys.arnRemotesMap[tgt.Arn] = clnt
|
||||
sys.arnRemotesMap[tgt.Arn] = arnTarget{Client: clnt}
|
||||
sys.updateBandwidthLimit(bucket, tgt.Arn, tgt.BandwidthLimit)
|
||||
return nil
|
||||
}
|
||||
@@ -432,11 +447,71 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sys *BucketTargetSys) markRefreshInProgress(bucket, arn string) {
|
||||
sys.aMutex.Lock()
|
||||
defer sys.aMutex.Unlock()
|
||||
if v, ok := sys.arnErrsMap[arn]; !ok {
|
||||
sys.arnErrsMap[arn] = arnErrs{
|
||||
updateInProgress: true,
|
||||
count: v.count + 1,
|
||||
bucket: bucket,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sys *BucketTargetSys) markRefreshDone(bucket, arn string) {
|
||||
sys.aMutex.Lock()
|
||||
defer sys.aMutex.Unlock()
|
||||
if v, ok := sys.arnErrsMap[arn]; ok {
|
||||
sys.arnErrsMap[arn] = arnErrs{
|
||||
updateInProgress: false,
|
||||
count: v.count,
|
||||
bucket: bucket,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sys *BucketTargetSys) isReloadingTarget(bucket, arn string) bool {
|
||||
sys.aMutex.RLock()
|
||||
defer sys.aMutex.RUnlock()
|
||||
if v, ok := sys.arnErrsMap[arn]; ok {
|
||||
return v.updateInProgress
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (sys *BucketTargetSys) incTargetErr(bucket, arn string) {
|
||||
sys.aMutex.Lock()
|
||||
defer sys.aMutex.Unlock()
|
||||
if v, ok := sys.arnErrsMap[arn]; ok {
|
||||
sys.arnErrsMap[arn] = arnErrs{
|
||||
updateInProgress: v.updateInProgress,
|
||||
count: v.count + 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetRemoteTargetClient returns minio-go client for replication target instance
|
||||
func (sys *BucketTargetSys) GetRemoteTargetClient(arn string) *TargetClient {
|
||||
func (sys *BucketTargetSys) GetRemoteTargetClient(bucket, arn string) *TargetClient {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
return sys.arnRemotesMap[arn]
|
||||
tgt := sys.arnRemotesMap[arn]
|
||||
sys.RUnlock()
|
||||
|
||||
if tgt.Client != nil {
|
||||
return tgt.Client
|
||||
}
|
||||
defer func() { // lazy refresh remote targets
|
||||
if tgt.Client == nil && !sys.isReloadingTarget(bucket, arn) && (tgt.lastRefresh.Equal(timeSentinel) || tgt.lastRefresh.Before(UTCNow().Add(-5*time.Minute))) {
|
||||
tgts, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket)
|
||||
if err == nil {
|
||||
sys.markRefreshInProgress(bucket, arn)
|
||||
sys.UpdateAllTargets(bucket, tgts)
|
||||
sys.markRefreshDone(bucket, arn)
|
||||
}
|
||||
}
|
||||
sys.incTargetErr(bucket, arn)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRemoteBucketTargetByArn returns BucketTarget for a ARN
|
||||
@@ -457,8 +532,9 @@ func (sys *BucketTargetSys) GetRemoteBucketTargetByArn(ctx context.Context, buck
|
||||
// NewBucketTargetSys - creates new replication system.
|
||||
func NewBucketTargetSys(ctx context.Context) *BucketTargetSys {
|
||||
sys := &BucketTargetSys{
|
||||
arnRemotesMap: make(map[string]*TargetClient),
|
||||
arnRemotesMap: make(map[string]arnTarget),
|
||||
targetsMap: make(map[string][]madmin.BucketTarget),
|
||||
arnErrsMap: make(map[string]arnErrs),
|
||||
hc: make(map[string]epHealth),
|
||||
hcClient: newHCClient(),
|
||||
}
|
||||
@@ -502,7 +578,10 @@ func (sys *BucketTargetSys) UpdateAllTargets(bucket string, tgts *madmin.BucketT
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
||||
sys.arnRemotesMap[tgt.Arn] = arnTarget{
|
||||
Client: tgtClient,
|
||||
lastRefresh: UTCNow(),
|
||||
}
|
||||
sys.updateBandwidthLimit(bucket, tgt.Arn, tgt.BandwidthLimit)
|
||||
}
|
||||
|
||||
@@ -526,7 +605,7 @@ func (sys *BucketTargetSys) set(bucket BucketInfo, meta BucketMetadata) {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
continue
|
||||
}
|
||||
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
||||
sys.arnRemotesMap[tgt.Arn] = arnTarget{Client: tgtClient}
|
||||
sys.updateBandwidthLimit(bucket.Name, tgt.Arn, tgt.BandwidthLimit)
|
||||
}
|
||||
sys.targetsMap[bucket.Name] = cfg.Targets
|
||||
|
||||
@@ -131,7 +131,7 @@ func performCallhome(ctx context.Context) {
|
||||
Version: madmin.HealthInfoVersion,
|
||||
Minio: madmin.MinioHealthInfo{
|
||||
Info: madmin.MinioInfo{
|
||||
DeploymentID: globalDeploymentID,
|
||||
DeploymentID: globalDeploymentID(),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -157,15 +157,11 @@ func performCallhome(ctx context.Context) {
|
||||
}
|
||||
|
||||
const (
|
||||
healthURL = "https://subnet.min.io/api/health/upload"
|
||||
healthURLDev = "http://localhost:9000/api/health/upload"
|
||||
subnetHealthPath = "/api/health/upload"
|
||||
)
|
||||
|
||||
func sendHealthInfo(ctx context.Context, healthInfo madmin.HealthInfo) error {
|
||||
url := healthURL
|
||||
if globalIsCICD {
|
||||
url = healthURLDev
|
||||
}
|
||||
url := globalSubnetConfig.BaseURL + subnetHealthPath
|
||||
|
||||
filename := fmt.Sprintf("health_%s.json.gz", UTCNow().Format("20060102150405"))
|
||||
url += "?filename=" + filename
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
@@ -54,7 +53,6 @@ import (
|
||||
"github.com/minio/kes-go"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/color"
|
||||
@@ -71,10 +69,7 @@ import (
|
||||
// serverDebugLog will enable debug printing
|
||||
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
|
||||
|
||||
var (
|
||||
shardDiskTimeDelta time.Duration
|
||||
defaultAWSCredProvider []credentials.Provider
|
||||
)
|
||||
var shardDiskTimeDelta time.Duration
|
||||
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
@@ -112,30 +107,23 @@ func init() {
|
||||
gob.Register(madmin.XFSErrorConfigs{})
|
||||
gob.Register(map[string]interface{}{})
|
||||
|
||||
defaultAWSCredProvider = []credentials.Provider{
|
||||
&credentials.IAM{
|
||||
Client: &http.Client{
|
||||
Transport: NewHTTPTransport(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
shardDiskTimeDelta, err = time.ParseDuration(env.Get("_MINIO_SHARD_DISKTIME_DELTA", "1m"))
|
||||
if err != nil {
|
||||
shardDiskTimeDelta = 1 * time.Minute
|
||||
}
|
||||
|
||||
// All minio-go API operations shall be performed only once,
|
||||
// All minio-go and madmin-go API operations shall be performed only once,
|
||||
// another way to look at this is we are turning off retries.
|
||||
minio.MaxRetry = 1
|
||||
madmin.MaxRetry = 1
|
||||
}
|
||||
|
||||
const consolePrefix = "CONSOLE_"
|
||||
|
||||
func minioConfigToConsoleFeatures() {
|
||||
os.Setenv("CONSOLE_PBKDF_SALT", globalDeploymentID)
|
||||
os.Setenv("CONSOLE_PBKDF_PASSPHRASE", globalDeploymentID)
|
||||
os.Setenv("CONSOLE_PBKDF_SALT", globalDeploymentID())
|
||||
os.Setenv("CONSOLE_PBKDF_PASSPHRASE", globalDeploymentID())
|
||||
if globalMinioEndpoint != "" {
|
||||
os.Setenv("CONSOLE_MINIO_SERVER", globalMinioEndpoint)
|
||||
} else {
|
||||
@@ -143,29 +131,33 @@ func minioConfigToConsoleFeatures() {
|
||||
// This will save users from providing a certificate with IP or FQDN SAN that points to the local host.
|
||||
os.Setenv("CONSOLE_MINIO_SERVER", fmt.Sprintf("%s://127.0.0.1:%s", getURLScheme(globalIsTLS), globalMinioPort))
|
||||
}
|
||||
if value := env.Get("MINIO_LOG_QUERY_URL", ""); value != "" {
|
||||
if value := env.Get(config.EnvMinIOLogQueryURL, ""); value != "" {
|
||||
os.Setenv("CONSOLE_LOG_QUERY_URL", value)
|
||||
if value := env.Get("MINIO_LOG_QUERY_AUTH_TOKEN", ""); value != "" {
|
||||
if value := env.Get(config.EnvMinIOLogQueryAuthToken, ""); value != "" {
|
||||
os.Setenv("CONSOLE_LOG_QUERY_AUTH_TOKEN", value)
|
||||
}
|
||||
}
|
||||
// pass the console subpath configuration
|
||||
if value := env.Get(config.EnvBrowserRedirectURL, ""); value != "" {
|
||||
if globalBrowserRedirectURL != nil {
|
||||
subPath := path.Clean(pathJoin(strings.TrimSpace(globalBrowserRedirectURL.Path), SlashSeparator))
|
||||
if subPath != SlashSeparator {
|
||||
os.Setenv("CONSOLE_SUBPATH", subPath)
|
||||
}
|
||||
}
|
||||
// Enable if prometheus URL is set.
|
||||
if value := env.Get("MINIO_PROMETHEUS_URL", ""); value != "" {
|
||||
if value := env.Get(config.EnvMinIOPrometheusURL, ""); value != "" {
|
||||
os.Setenv("CONSOLE_PROMETHEUS_URL", value)
|
||||
if value := env.Get("MINIO_PROMETHEUS_JOB_ID", "minio-job"); value != "" {
|
||||
if value := env.Get(config.EnvMinIOPrometheusJobID, "minio-job"); value != "" {
|
||||
os.Setenv("CONSOLE_PROMETHEUS_JOB_ID", value)
|
||||
// Support additional labels for more granular filtering.
|
||||
if value := env.Get("MINIO_PROMETHEUS_EXTRA_LABELS", ""); value != "" {
|
||||
if value := env.Get(config.EnvMinIOPrometheusExtraLabels, ""); value != "" {
|
||||
os.Setenv("CONSOLE_PROMETHEUS_EXTRA_LABELS", value)
|
||||
}
|
||||
}
|
||||
// Support Prometheus Auth Token
|
||||
if value := env.Get(config.EnvMinIOPrometheusAuthToken, ""); value != "" {
|
||||
os.Setenv("CONSOLE_PROMETHEUS_AUTH_TOKEN", value)
|
||||
}
|
||||
}
|
||||
// Enable if LDAP is enabled.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
@@ -175,9 +167,12 @@ func minioConfigToConsoleFeatures() {
|
||||
if value := env.Get(config.EnvBrowserLoginAnimation, "on"); value != "" {
|
||||
os.Setenv("CONSOLE_ANIMATED_LOGIN", value)
|
||||
}
|
||||
|
||||
// Pass on the session duration environment variable, else we will default to 12 hours
|
||||
if value := env.Get(config.EnvBrowserSessionDuration, ""); value != "" {
|
||||
os.Setenv("CONSOLE_STS_DURATION", value)
|
||||
if valueSts := env.Get(config.EnvMinioStsDuration, ""); valueSts != "" {
|
||||
os.Setenv("CONSOLE_STS_DURATION", valueSts)
|
||||
} else if valueSession := env.Get(config.EnvBrowserSessionDuration, ""); valueSession != "" {
|
||||
os.Setenv("CONSOLE_STS_DURATION", valueSession)
|
||||
}
|
||||
|
||||
os.Setenv("CONSOLE_MINIO_REGION", globalSite.Region)
|
||||
@@ -199,7 +194,7 @@ func buildOpenIDConsoleConfig() consoleoauth2.OpenIDPCfg {
|
||||
DisplayName: cfg.DisplayName,
|
||||
ClientID: cfg.ClientID,
|
||||
ClientSecret: cfg.ClientSecret,
|
||||
HMACSalt: globalDeploymentID,
|
||||
HMACSalt: globalDeploymentID(),
|
||||
HMACPassphrase: cfg.ClientID,
|
||||
Scopes: strings.Join(cfg.DiscoveryDoc.ScopesSupported, ","),
|
||||
Userinfo: cfg.ClaimUserinfo,
|
||||
@@ -391,7 +386,7 @@ func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
if consoleAddr == "" {
|
||||
p, err := xnet.GetFreePort()
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to get free port for console on the host")
|
||||
logger.FatalIf(err, "Unable to get free port for Console UI on the host")
|
||||
}
|
||||
consoleAddr = net.JoinHostPort("", p.String())
|
||||
}
|
||||
@@ -405,6 +400,15 @@ func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
}
|
||||
|
||||
globalMinioHost, globalMinioPort = mustSplitHostPort(addr)
|
||||
if globalMinioPort == "0" {
|
||||
p, err := xnet.GetFreePort()
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to get free port for S3 API on the host")
|
||||
}
|
||||
globalMinioPort = p.String()
|
||||
globalDynamicAPIPort = true
|
||||
}
|
||||
|
||||
globalMinioConsoleHost, globalMinioConsolePort = mustSplitHostPort(consoleAddr)
|
||||
|
||||
if globalMinioPort == globalMinioConsolePort {
|
||||
@@ -618,8 +622,6 @@ func loadEnvVarsFromFiles() {
|
||||
}
|
||||
|
||||
func handleCommonEnvVars() {
|
||||
loadEnvVarsFromFiles()
|
||||
|
||||
var err error
|
||||
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.EnableOn))
|
||||
if err != nil {
|
||||
@@ -640,6 +642,7 @@ func handleCommonEnvVars() {
|
||||
}
|
||||
globalBrowserRedirectURL = u
|
||||
}
|
||||
globalBrowserRedirect = env.Get(config.EnvBrowserRedirect, config.EnableOn) == config.EnableOn
|
||||
}
|
||||
|
||||
if serverURL := env.Get(config.EnvMinIOServerURL, ""); serverURL != "" {
|
||||
@@ -664,10 +667,14 @@ func handleCommonEnvVars() {
|
||||
logger.Fatal(config.ErrInvalidFSOSyncValue(err), "Invalid MINIO_FS_OSYNC value in environment variable")
|
||||
}
|
||||
|
||||
if rootDiskSize := env.Get(config.EnvRootDiskThresholdSize, ""); rootDiskSize != "" {
|
||||
rootDiskSize := env.Get(config.EnvRootDriveThresholdSize, "")
|
||||
if rootDiskSize == "" {
|
||||
rootDiskSize = env.Get(config.EnvRootDiskThresholdSize, "")
|
||||
}
|
||||
if rootDiskSize != "" {
|
||||
size, err := humanize.ParseBytes(rootDiskSize)
|
||||
if err != nil {
|
||||
logger.Fatal(err, fmt.Sprintf("Invalid %s value in environment variable", config.EnvRootDiskThresholdSize))
|
||||
logger.Fatal(err, fmt.Sprintf("Invalid %s value in root drive threshold environment variable", rootDiskSize))
|
||||
}
|
||||
globalRootDiskThreshold = size
|
||||
}
|
||||
@@ -802,6 +809,9 @@ func handleKMSConfig() {
|
||||
logger.Fatal(errors.New("ambigious KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientCert))
|
||||
}
|
||||
}
|
||||
if !env.IsSet(kms.EnvKESKeyName) {
|
||||
logger.Fatal(errors.New("Invalid KES configuration"), fmt.Sprintf("The mandatory environment variable %q not set", kms.EnvKESKeyName))
|
||||
}
|
||||
|
||||
var endpoints []string
|
||||
for _, endpoint := range strings.Split(env.Get(kms.EnvKESEndpoint, ""), ",") {
|
||||
|
||||
@@ -71,7 +71,7 @@ func deleteConfig(ctx context.Context, objAPI objectDeleter, configFile string)
|
||||
}
|
||||
|
||||
func saveConfigWithOpts(ctx context.Context, store objectIO, configFile string, data []byte, opts ObjectOptions) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
|
||||
hashReader, err := hash.NewReader(ctx, bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -27,10 +27,12 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/config/api"
|
||||
"github.com/minio/minio/internal/config/batch"
|
||||
"github.com/minio/minio/internal/config/cache"
|
||||
"github.com/minio/minio/internal/config/callhome"
|
||||
"github.com/minio/minio/internal/config/compress"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/config/drive"
|
||||
"github.com/minio/minio/internal/config/etcd"
|
||||
"github.com/minio/minio/internal/config/heal"
|
||||
xldap "github.com/minio/minio/internal/config/identity/ldap"
|
||||
@@ -46,7 +48,6 @@ import (
|
||||
"github.com/minio/minio/internal/config/subnet"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
)
|
||||
@@ -54,7 +55,6 @@ import (
|
||||
func initHelp() {
|
||||
kvs := map[string]config.KVS{
|
||||
config.EtcdSubSys: etcd.DefaultKVS,
|
||||
config.CacheSubSys: cache.DefaultKVS,
|
||||
config.CompressionSubSys: compress.DefaultKVS,
|
||||
config.IdentityLDAPSubSys: xldap.DefaultKVS,
|
||||
config.IdentityOpenIDSubSys: openid.DefaultKVS,
|
||||
@@ -71,6 +71,9 @@ func initHelp() {
|
||||
config.ScannerSubSys: scanner.DefaultKVS,
|
||||
config.SubnetSubSys: subnet.DefaultKVS,
|
||||
config.CallhomeSubSys: callhome.DefaultKVS,
|
||||
config.DriveSubSys: drive.DefaultKVS,
|
||||
config.CacheSubSys: cache.DefaultKVS,
|
||||
config.BatchSubSys: batch.DefaultKVS,
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
kvs[k] = v
|
||||
@@ -98,6 +101,10 @@ func initHelp() {
|
||||
Description: "enable callhome to MinIO SUBNET",
|
||||
Optional: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.DriveSubSys,
|
||||
Description: "enable drive specific settings",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.SiteSubSys,
|
||||
Description: "label the server and its location",
|
||||
@@ -110,6 +117,10 @@ func initHelp() {
|
||||
Key: config.ScannerSubSys,
|
||||
Description: "manage namespace scanning for usage calculation, lifecycle, healing and more",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.BatchSubSys,
|
||||
Description: "manage batch job workers and wait times",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CompressionSubSys,
|
||||
Description: "enable server side compression of objects",
|
||||
@@ -211,7 +222,9 @@ func initHelp() {
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CacheSubSys,
|
||||
Description: "[DEPRECATED] add caching storage tier",
|
||||
Type: "string",
|
||||
Description: "enable various cache optimizations on MinIO for reads",
|
||||
Optional: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -232,9 +245,9 @@ func initHelp() {
|
||||
config.APISubSys: api.Help,
|
||||
config.StorageClassSubSys: storageclass.Help,
|
||||
config.EtcdSubSys: etcd.Help,
|
||||
config.CacheSubSys: cache.Help,
|
||||
config.CompressionSubSys: compress.Help,
|
||||
config.HealSubSys: heal.Help,
|
||||
config.BatchSubSys: batch.Help,
|
||||
config.ScannerSubSys: scanner.Help,
|
||||
config.IdentityOpenIDSubSys: openid.Help,
|
||||
config.IdentityLDAPSubSys: xldap.Help,
|
||||
@@ -258,6 +271,8 @@ func initHelp() {
|
||||
config.LambdaWebhookSubSys: lambda.HelpWebhook,
|
||||
config.SubnetSubSys: subnet.HelpSubnet,
|
||||
config.CallhomeSubSys: callhome.HelpCallhome,
|
||||
config.DriveSubSys: drive.HelpDrive,
|
||||
config.CacheSubSys: cache.Help,
|
||||
}
|
||||
|
||||
config.RegisterHelpSubSys(helpMap)
|
||||
@@ -293,6 +308,10 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
|
||||
if _, err := api.LookupConfig(s[config.APISubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.BatchSubSys:
|
||||
if _, err := batch.LookupConfig(s[config.BatchSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.StorageClassSubSys:
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
@@ -302,10 +321,6 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
|
||||
return err
|
||||
}
|
||||
}
|
||||
case config.CacheSubSys:
|
||||
if _, err := cache.LookupConfig(s[config.CacheSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.CompressionSubSys:
|
||||
if _, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
@@ -369,6 +384,14 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
|
||||
if cfg.Enabled() && !globalSubnetConfig.Registered() {
|
||||
return errors.New("Deployment is not registered with SUBNET. Please register the deployment via 'mc license register ALIAS'")
|
||||
}
|
||||
case config.DriveSubSys:
|
||||
if _, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.CacheSubSys:
|
||||
if _, err := cache.LookupConfig(s[config.CacheSubSys][config.Default], globalRemoteTargetTransport); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.PolicyOPASubSys:
|
||||
// In case legacy OPA config is being set, we treat it as if the
|
||||
// AuthZPlugin is being set.
|
||||
@@ -493,20 +516,6 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err))
|
||||
}
|
||||
|
||||
globalCacheConfig, err = cache.LookupConfig(s[config.CacheSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to setup cache: %w", err))
|
||||
}
|
||||
|
||||
if globalCacheConfig.Enabled {
|
||||
if cacheEncKey := env.Get(cache.EnvCacheEncryptionKey, ""); cacheEncKey != "" {
|
||||
globalCacheKMS, err = kms.Parse(cacheEncKey)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to setup encryption cache: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
globalAutoEncryption = crypto.LookupAutoEncryption() // Enable auto-encryption if enabled
|
||||
if globalAutoEncryption && GlobalKMS == nil {
|
||||
logger.Fatal(errors.New("no KMS configured"), "MINIO_KMS_AUTO_ENCRYPTION requires a valid KMS configuration")
|
||||
@@ -566,6 +575,12 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
return fmt.Errorf("Unable to apply heal config: %w", err)
|
||||
}
|
||||
globalHealConfig.Update(healCfg)
|
||||
case config.BatchSubSys:
|
||||
batchCfg, err := batch.LookupConfig(s[config.BatchSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply batch config: %w", err)
|
||||
}
|
||||
globalBatchConfig.Update(batchCfg)
|
||||
case config.ScannerSubSys:
|
||||
scannerCfg, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default])
|
||||
if err != nil {
|
||||
@@ -644,7 +659,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err))
|
||||
} else {
|
||||
globalSubnetConfig.Update(subnetConfig)
|
||||
globalSubnetConfig.Update(subnetConfig, globalIsCICD)
|
||||
globalSubnetConfig.ApplyEnv() // update environment settings for Console UI
|
||||
}
|
||||
case config.CallhomeSubSys:
|
||||
@@ -658,6 +673,22 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
initCallhome(ctx, objAPI)
|
||||
}
|
||||
}
|
||||
case config.DriveSubSys:
|
||||
if driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err))
|
||||
} else {
|
||||
err := globalDriveConfig.Update(driveConfig)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err))
|
||||
}
|
||||
}
|
||||
case config.CacheSubSys:
|
||||
cacheCfg, err := cache.LookupConfig(s[config.CacheSubSys][config.Default], globalRemoteTargetTransport)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load cache config: %w", err))
|
||||
} else {
|
||||
globalCacheConfig.Update(cacheCfg)
|
||||
}
|
||||
}
|
||||
globalServerConfigMu.Lock()
|
||||
defer globalServerConfigMu.Unlock()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,336 +0,0 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Test if config v1 is purged
|
||||
func TestServerConfigMigrateV1(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
err = newTestConfig(globalMinioDefaultRegion, objLayer)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
rootPath := t.TempDir()
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = objLayer
|
||||
globalObjLayerMutex.Unlock()
|
||||
|
||||
// Create a V1 config json file and store it
|
||||
configJSON := "{ \"version\":\"1\", \"accessKeyId\":\"abcde\", \"secretAccessKey\":\"abcdefgh\"}"
|
||||
configPath := rootPath + "/fsUsers.json"
|
||||
if err := os.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Fire a migrateConfig()
|
||||
if err := migrateConfig(); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Check if config v1 is removed from filesystem
|
||||
if _, err := os.Stat(configPath); err == nil || !osIsNotExist(err) {
|
||||
t.Fatal("Config V1 file is not purged")
|
||||
}
|
||||
|
||||
// Initialize server config and check again if everything is fine
|
||||
if err := loadConfig(objLayer, nil); err != nil {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test if all migrate code returns nil when config file does not
|
||||
// exist
|
||||
func TestServerConfigMigrateInexistentConfig(t *testing.T) {
|
||||
globalConfigDir = &ConfigDir{path: t.TempDir()}
|
||||
|
||||
if err := migrateV2ToV3(); err != nil {
|
||||
t.Fatal("migrate v2 to v3 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV3ToV4(); err != nil {
|
||||
t.Fatal("migrate v3 to v4 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV4ToV5(); err != nil {
|
||||
t.Fatal("migrate v4 to v5 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV5ToV6(); err != nil {
|
||||
t.Fatal("migrate v5 to v6 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV6ToV7(); err != nil {
|
||||
t.Fatal("migrate v6 to v7 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV7ToV8(); err != nil {
|
||||
t.Fatal("migrate v7 to v8 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV8ToV9(); err != nil {
|
||||
t.Fatal("migrate v8 to v9 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV9ToV10(); err != nil {
|
||||
t.Fatal("migrate v9 to v10 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV10ToV11(); err != nil {
|
||||
t.Fatal("migrate v10 to v11 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV11ToV12(); err != nil {
|
||||
t.Fatal("migrate v11 to v12 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV12ToV13(); err != nil {
|
||||
t.Fatal("migrate v12 to v13 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV13ToV14(); err != nil {
|
||||
t.Fatal("migrate v13 to v14 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV14ToV15(); err != nil {
|
||||
t.Fatal("migrate v14 to v15 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV15ToV16(); err != nil {
|
||||
t.Fatal("migrate v15 to v16 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV16ToV17(); err != nil {
|
||||
t.Fatal("migrate v16 to v17 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV17ToV18(); err != nil {
|
||||
t.Fatal("migrate v17 to v18 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV18ToV19(); err != nil {
|
||||
t.Fatal("migrate v18 to v19 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV19ToV20(); err != nil {
|
||||
t.Fatal("migrate v19 to v20 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV20ToV21(); err != nil {
|
||||
t.Fatal("migrate v20 to v21 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV21ToV22(); err != nil {
|
||||
t.Fatal("migrate v21 to v22 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV22ToV23(); err != nil {
|
||||
t.Fatal("migrate v22 to v23 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV23ToV24(); err != nil {
|
||||
t.Fatal("migrate v23 to v24 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV24ToV25(); err != nil {
|
||||
t.Fatal("migrate v24 to v25 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV25ToV26(); err != nil {
|
||||
t.Fatal("migrate v25 to v26 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV26ToV27(); err != nil {
|
||||
t.Fatal("migrate v26 to v27 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV27ToV28(); err != nil {
|
||||
t.Fatal("migrate v27 to v28 should succeed when no config file is found")
|
||||
}
|
||||
}
|
||||
|
||||
// Test if a config migration from v2 to v33 is successfully done
|
||||
func TestServerConfigMigrateV2toV33(t *testing.T) {
|
||||
rootPath := t.TempDir()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
|
||||
objLayer, fsDir, err := prepareFS(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
if err := os.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
// Fire a migrateConfig()
|
||||
if err := migrateConfig(); err == nil {
|
||||
t.Fatal("migration should fail with corrupted config file")
|
||||
}
|
||||
|
||||
accessKey := "accessfoo"
|
||||
secretKey := "secretfoo"
|
||||
|
||||
// Create a V2 config json file and store it
|
||||
configJSON := "{ \"version\":\"2\", \"credentials\": {\"accessKeyId\":\"" + accessKey + "\", \"secretAccessKey\":\"" + secretKey + "\", \"region\":\"us-east-1\"}, \"mongoLogger\":{\"addr\":\"127.0.0.1:3543\", \"db\":\"foodb\", \"collection\":\"foo\"}, \"syslogLogger\":{\"network\":\"127.0.0.1:543\", \"addr\":\"addr\"}, \"fileLogger\":{\"filename\":\"log.out\"}}"
|
||||
if err := os.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Fire a migrateConfig()
|
||||
if err := migrateConfig(); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
if err := migrateConfigToMinioSys(objLayer); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
srvCfg, err := readConfigWithoutMigrate(context.Background(), objLayer)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
if err = saveServerConfig(GlobalContext, objLayer, srvCfg); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Initialize server config and check again if everything is fine
|
||||
if err := loadConfig(objLayer, nil); err != nil {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test if all migrate code returns error with corrupted config files
|
||||
func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
||||
rootPath := t.TempDir()
|
||||
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
if err := os.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Test different migrate versions and be sure they are returning an error
|
||||
if err := migrateV2ToV3(); err == nil {
|
||||
t.Fatal("migrateConfigV2ToV3() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV3ToV4(); err == nil {
|
||||
t.Fatal("migrateConfigV3ToV4() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV4ToV5(); err == nil {
|
||||
t.Fatal("migrateConfigV4ToV5() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV5ToV6(); err == nil {
|
||||
t.Fatal("migrateConfigV5ToV6() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV6ToV7(); err == nil {
|
||||
t.Fatal("migrateConfigV6ToV7() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV7ToV8(); err == nil {
|
||||
t.Fatal("migrateConfigV7ToV8() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV8ToV9(); err == nil {
|
||||
t.Fatal("migrateConfigV8ToV9() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV9ToV10(); err == nil {
|
||||
t.Fatal("migrateConfigV9ToV10() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV10ToV11(); err == nil {
|
||||
t.Fatal("migrateConfigV10ToV11() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV11ToV12(); err == nil {
|
||||
t.Fatal("migrateConfigV11ToV12() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV12ToV13(); err == nil {
|
||||
t.Fatal("migrateConfigV12ToV13() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV13ToV14(); err == nil {
|
||||
t.Fatal("migrateConfigV13ToV14() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV14ToV15(); err == nil {
|
||||
t.Fatal("migrateConfigV14ToV15() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV15ToV16(); err == nil {
|
||||
t.Fatal("migrateConfigV15ToV16() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV16ToV17(); err == nil {
|
||||
t.Fatal("migrateConfigV16ToV17() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV17ToV18(); err == nil {
|
||||
t.Fatal("migrateConfigV17ToV18() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV18ToV19(); err == nil {
|
||||
t.Fatal("migrateConfigV18ToV19() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV19ToV20(); err == nil {
|
||||
t.Fatal("migrateConfigV19ToV20() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV20ToV21(); err == nil {
|
||||
t.Fatal("migrateConfigV20ToV21() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV21ToV22(); err == nil {
|
||||
t.Fatal("migrateConfigV21ToV22() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV22ToV23(); err == nil {
|
||||
t.Fatal("migrateConfigV22ToV23() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV23ToV24(); err == nil {
|
||||
t.Fatal("migrateConfigV23ToV24() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV24ToV25(); err == nil {
|
||||
t.Fatal("migrateConfigV24ToV25() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV25ToV26(); err == nil {
|
||||
t.Fatal("migrateConfigV25ToV26() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV26ToV27(); err == nil {
|
||||
t.Fatal("migrateConfigV26ToV27() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV27ToV28(); err == nil {
|
||||
t.Fatal("migrateConfigV27ToV28() should fail with a corrupted json")
|
||||
}
|
||||
}
|
||||
|
||||
// Test if all migrate code returns error with corrupted config files
|
||||
func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
||||
rootPath := t.TempDir()
|
||||
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
for i := 3; i <= 17; i++ {
|
||||
// Create a corrupted config file
|
||||
if err := os.WriteFile(configPath, []byte(fmt.Sprintf("{ \"version\":\"%d\", \"credential\": { \"accessKey\": 1 } }", i)),
|
||||
0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Test different migrate versions and be sure they are returning an error
|
||||
if err := migrateConfig(); err == nil {
|
||||
t.Fatal("migrateConfig() should fail with a corrupted json")
|
||||
}
|
||||
}
|
||||
|
||||
// Create a corrupted config file for version '2'.
|
||||
if err := os.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Test different migrate versions and be sure they are returning an error
|
||||
if err := migrateConfig(); err == nil {
|
||||
t.Fatal("migrateConfig() should fail with a corrupted json")
|
||||
}
|
||||
}
|
||||
@@ -18,418 +18,18 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/config/cache"
|
||||
"github.com/minio/minio/internal/config/compress"
|
||||
xldap "github.com/minio/minio/internal/config/identity/ldap"
|
||||
"github.com/minio/minio/internal/config/identity/openid"
|
||||
"github.com/minio/minio/internal/config/notify"
|
||||
"github.com/minio/minio/internal/config/policy/opa"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/event/target"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/quick"
|
||||
)
|
||||
|
||||
type configV1 struct {
|
||||
Version string `json:"version"`
|
||||
AccessKey string `json:"accessKeyId"`
|
||||
SecretKey string `json:"secretAccessKey"`
|
||||
}
|
||||
|
||||
type configV2 struct {
|
||||
Version string `json:"version"`
|
||||
Credentials struct {
|
||||
AccessKey string `json:"accessKeyId"`
|
||||
SecretKey string `json:"secretAccessKey"`
|
||||
Region string `json:"region"`
|
||||
} `json:"credentials"`
|
||||
MongoLogger struct {
|
||||
Addr string `json:"addr"`
|
||||
DB string `json:"db"`
|
||||
Collection string `json:"collection"`
|
||||
} `json:"mongoLogger"`
|
||||
SyslogLogger struct {
|
||||
Network string `json:"network"`
|
||||
Addr string `json:"addr"`
|
||||
} `json:"syslogLogger"`
|
||||
FileLogger struct {
|
||||
Filename string `json:"filename"`
|
||||
} `json:"fileLogger"`
|
||||
}
|
||||
|
||||
// backendV3 type.
|
||||
type backendV3 struct {
|
||||
Type string `json:"type"`
|
||||
Disk string `json:"disk,omitempty"`
|
||||
Disks []string `json:"disks,omitempty"`
|
||||
}
|
||||
|
||||
// syslogLogger v3
|
||||
type syslogLoggerV3 struct {
|
||||
Enable bool `json:"enable"`
|
||||
Addr string `json:"address"`
|
||||
Level string `json:"level"`
|
||||
}
|
||||
|
||||
// loggerV3 type.
|
||||
type loggerV3 struct {
|
||||
Console struct {
|
||||
Enable bool `json:"enable"`
|
||||
Level string `json:"level"`
|
||||
}
|
||||
File struct {
|
||||
Enable bool `json:"enable"`
|
||||
Filename string `json:"fileName"`
|
||||
Level string `json:"level"`
|
||||
}
|
||||
Syslog struct {
|
||||
Enable bool `json:"enable"`
|
||||
Addr string `json:"address"`
|
||||
Level string `json:"level"`
|
||||
} `json:"syslog"`
|
||||
// Add new loggers here.
|
||||
}
|
||||
|
||||
// configV3 server configuration version '3'.
|
||||
type configV3 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// Backend configuration.
|
||||
Backend backendV3 `json:"backend"`
|
||||
|
||||
// http Server configuration.
|
||||
Addr string `json:"address"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger loggerV3 `json:"logger"`
|
||||
}
|
||||
|
||||
// logger type representing version '4' logger config.
|
||||
type loggerV4 struct {
|
||||
Console struct {
|
||||
Enable bool `json:"enable"`
|
||||
Level string `json:"level"`
|
||||
} `json:"console"`
|
||||
File struct {
|
||||
Enable bool `json:"enable"`
|
||||
Filename string `json:"fileName"`
|
||||
Level string `json:"level"`
|
||||
} `json:"file"`
|
||||
Syslog struct {
|
||||
Enable bool `json:"enable"`
|
||||
Addr string `json:"address"`
|
||||
Level string `json:"level"`
|
||||
} `json:"syslog"`
|
||||
}
|
||||
|
||||
// configV4 server configuration version '4'.
|
||||
type configV4 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger loggerV4 `json:"logger"`
|
||||
}
|
||||
|
||||
// logger type representing version '5' logger config.
|
||||
type loggerV5 struct {
|
||||
Console struct {
|
||||
Enable bool `json:"enable"`
|
||||
Level string `json:"level"`
|
||||
} `json:"console"`
|
||||
File struct {
|
||||
Enable bool `json:"enable"`
|
||||
Filename string `json:"fileName"`
|
||||
Level string `json:"level"`
|
||||
} `json:"file"`
|
||||
Syslog struct {
|
||||
Enable bool `json:"enable"`
|
||||
Addr string `json:"address"`
|
||||
Level string `json:"level"`
|
||||
} `json:"syslog"`
|
||||
AMQP struct {
|
||||
Enable bool `json:"enable"`
|
||||
Level string `json:"level"`
|
||||
URL string `json:"url"`
|
||||
Exchange string `json:"exchange"`
|
||||
RoutingKey string `json:"routingKey"`
|
||||
ExchangeType string `json:"exchangeType"`
|
||||
Mandatory bool `json:"mandatory"`
|
||||
Immediate bool `json:"immediate"`
|
||||
Durable bool `json:"durable"`
|
||||
Internal bool `json:"internal"`
|
||||
NoWait bool `json:"noWait"`
|
||||
AutoDeleted bool `json:"autoDeleted"`
|
||||
} `json:"amqp"`
|
||||
ElasticSearch struct {
|
||||
Enable bool `json:"enable"`
|
||||
Level string `json:"level"`
|
||||
URL string `json:"url"`
|
||||
Index string `json:"index"`
|
||||
} `json:"elasticsearch"`
|
||||
Redis struct {
|
||||
Enable bool `json:"enable"`
|
||||
Level string `json:"level"`
|
||||
Addr string `json:"address"`
|
||||
Password string `json:"password"`
|
||||
Key string `json:"key"`
|
||||
} `json:"redis"`
|
||||
}
|
||||
|
||||
// configV5 server configuration version '5'.
|
||||
type configV5 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger loggerV5 `json:"logger"`
|
||||
}
|
||||
|
||||
// consoleLogger - default logger if not other logging is enabled.
|
||||
type consoleLoggerV1 struct {
|
||||
Enable bool `json:"enable"`
|
||||
Level string `json:"level"`
|
||||
}
|
||||
|
||||
type fileLoggerV1 struct {
|
||||
Enable bool `json:"enable"`
|
||||
Filename string `json:"fileName"`
|
||||
Level string `json:"level"`
|
||||
}
|
||||
|
||||
type loggerV6 struct {
|
||||
Console consoleLoggerV1 `json:"console"`
|
||||
File fileLoggerV1 `json:"file"`
|
||||
Syslog syslogLoggerV3 `json:"syslog"`
|
||||
}
|
||||
|
||||
// configV6 server configuration version '6'.
|
||||
type configV6 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger loggerV6 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV1 `json:"notify"`
|
||||
}
|
||||
|
||||
// Notifier represents collection of supported notification queues in version
|
||||
// 1 without NATS streaming.
|
||||
type notifierV1 struct {
|
||||
AMQP map[string]target.AMQPArgs `json:"amqp"`
|
||||
NATS map[string]natsNotifyV1 `json:"nats"`
|
||||
ElasticSearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
|
||||
Redis map[string]target.RedisArgs `json:"redis"`
|
||||
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
|
||||
Kafka map[string]target.KafkaArgs `json:"kafka"`
|
||||
}
|
||||
|
||||
// Notifier represents collection of supported notification queues in version 2
|
||||
// with NATS streaming but without webhook.
|
||||
type notifierV2 struct {
|
||||
AMQP map[string]target.AMQPArgs `json:"amqp"`
|
||||
NATS map[string]target.NATSArgs `json:"nats"`
|
||||
ElasticSearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
|
||||
Redis map[string]target.RedisArgs `json:"redis"`
|
||||
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
|
||||
Kafka map[string]target.KafkaArgs `json:"kafka"`
|
||||
}
|
||||
|
||||
// configV7 server configuration version '7'.
|
||||
type serverConfigV7 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger loggerV6 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV1 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV8 server configuration version '8'. Adds NATS notify.Config
|
||||
// configuration.
|
||||
type serverConfigV8 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger loggerV6 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV1 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV9 server configuration version '9'. Adds PostgreSQL
|
||||
// notify.Config configuration.
|
||||
type serverConfigV9 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger loggerV6 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV1 `json:"notify"`
|
||||
}
|
||||
|
||||
type loggerV7 struct {
|
||||
sync.RWMutex
|
||||
Console consoleLoggerV1 `json:"console"`
|
||||
File fileLoggerV1 `json:"file"`
|
||||
}
|
||||
|
||||
// serverConfigV10 server configuration version '10' which is like
|
||||
// version '9' except it drops support of syslog config, and makes the
|
||||
// RWMutex global (so it does not exist in this struct).
|
||||
type serverConfigV10 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV1 `json:"notify"`
|
||||
}
|
||||
|
||||
// natsNotifyV1 - structure was valid until config V 11
|
||||
type natsNotifyV1 struct {
|
||||
Enable bool `json:"enable"`
|
||||
Address string `json:"address"`
|
||||
Subject string `json:"subject"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Token string `json:"token"`
|
||||
Secure bool `json:"secure"`
|
||||
PingInterval int64 `json:"pingInterval"`
|
||||
}
|
||||
|
||||
// serverConfigV11 server configuration version '11' which is like
|
||||
// version '10' except it adds support for Kafka notifications.
|
||||
type serverConfigV11 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV1 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV12 server configuration version '12' which is like
|
||||
// version '11' except it adds support for NATS streaming notifications.
|
||||
type serverConfigV12 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV2 `json:"notify"`
|
||||
}
|
||||
|
||||
type notifierV3 struct {
|
||||
AMQP map[string]target.AMQPArgs `json:"amqp"`
|
||||
Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
|
||||
Kafka map[string]target.KafkaArgs `json:"kafka"`
|
||||
MQTT map[string]target.MQTTArgs `json:"mqtt"`
|
||||
MySQL map[string]target.MySQLArgs `json:"mysql"`
|
||||
NATS map[string]target.NATSArgs `json:"nats"`
|
||||
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
|
||||
Redis map[string]target.RedisArgs `json:"redis"`
|
||||
Webhook map[string]target.WebhookArgs `json:"webhook"`
|
||||
}
|
||||
|
||||
// serverConfigV13 server configuration version '13' which is like
|
||||
// version '12' except it adds support for webhook notification.
|
||||
type serverConfigV13 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger *loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV14 server configuration version '14' which is like
|
||||
// version '13' except it adds support of browser param.
|
||||
type serverConfigV14 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger *loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV15 server configuration version '15' which is like
|
||||
// version '14' except it adds mysql support
|
||||
type serverConfigV15 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger *loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// FileLogger is introduced to workaround the dependency about logrus
|
||||
type FileLogger struct {
|
||||
Enable bool `json:"enable"`
|
||||
@@ -441,282 +41,6 @@ type ConsoleLogger struct {
|
||||
Enable bool `json:"enable"`
|
||||
}
|
||||
|
||||
// Loggers struct is defined with FileLogger and ConsoleLogger
|
||||
// although they are removed from logging logic. They are
|
||||
// kept here just to workaround the dependency migration
|
||||
// code/logic has on them.
|
||||
type loggers struct {
|
||||
sync.RWMutex
|
||||
Console ConsoleLogger `json:"console"`
|
||||
File FileLogger `json:"file"`
|
||||
}
|
||||
|
||||
// serverConfigV16 server configuration version '16' which is like
|
||||
// version '15' except it makes a change to logging configuration.
|
||||
type serverConfigV16 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV17 server configuration version '17' which is like
|
||||
// version '16' except it adds support for "format" parameter in
|
||||
// database event notification targets: PostgreSQL, MySQL, Redis and
|
||||
// Elasticsearch.
|
||||
type serverConfigV17 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV18 server configuration version '18' which is like
|
||||
// version '17' except it adds support for "deliveryMode" parameter in
|
||||
// the AMQP notification target.
|
||||
type serverConfigV18 struct {
|
||||
sync.RWMutex
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV19 server configuration version '19' which is like
|
||||
// version '18' except it adds support for MQTT notifications.
|
||||
type serverConfigV19 struct {
|
||||
sync.RWMutex
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV20 server configuration version '20' which is like
|
||||
// version '19' except it adds support for VirtualHostDomain
|
||||
type serverConfigV20 struct {
|
||||
sync.RWMutex
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
Domain string `json:"domain"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV21 is just like version '20' without logger field
|
||||
type serverConfigV21 struct {
|
||||
sync.RWMutex
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
Domain string `json:"domain"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV22 is just like version '21' with added support
|
||||
// for StorageClass.
|
||||
type serverConfigV22 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
Domain string `json:"domain"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageclass.Config `json:"storageclass"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV23 is just like version '22' with addition of cache field.
|
||||
type serverConfigV23 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
Domain string `json:"domain"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageclass.Config `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache cache.Config `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV24 is just like version '23', we had to revert
|
||||
// the changes which were made in 6fb06045028b7a57c37c60a612c8e50735279ab4
|
||||
type serverConfigV24 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
Domain string `json:"domain"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageclass.Config `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache cache.Config `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV25 is just like version '24', stores additionally
|
||||
// worm variable.
|
||||
type serverConfigV25 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
Worm config.BoolFlag `json:"worm"`
|
||||
Domain string `json:"domain"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageclass.Config `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache cache.Config `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV26 is just like version '25', stores additionally
|
||||
// cache max use value in 'cache.Config'.
|
||||
type serverConfigV26 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
Worm config.BoolFlag `json:"worm"`
|
||||
Domain string `json:"domain"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageclass.Config `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache cache.Config `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV27 is just like version '26', stores additionally
|
||||
// the logger field
|
||||
type serverConfigV27 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser config.BoolFlag `json:"browser"`
|
||||
Worm config.BoolFlag `json:"worm"`
|
||||
Domain string `json:"domain"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageclass.Config `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache cache.Config `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger logger.Config `json:"logger"`
|
||||
}
|
||||
|
||||
// serverConfigV28 is just like version '27', additionally
|
||||
// storing KMS config
|
||||
type serverConfigV28 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm config.BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageclass.Config `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache cache.Config `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger logger.Config `json:"logger"`
|
||||
}
|
||||
|
||||
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir, queueLimit in all notification targets.
|
||||
type serverConfigV33 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
@@ -731,9 +55,6 @@ type serverConfigV33 struct {
|
||||
// Storage class configuration
|
||||
StorageClass storageclass.Config `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache cache.Config `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notify.Config `json:"notify"`
|
||||
|
||||
|
||||
@@ -66,10 +66,12 @@ const (
|
||||
scannerMetricYield
|
||||
scannerMetricCleanAbandoned
|
||||
scannerMetricApplyNonCurrent
|
||||
scannerMetricHealAbandonedVersion
|
||||
|
||||
// START Trace metrics:
|
||||
scannerMetricStartTrace
|
||||
scannerMetricScanObject // Scan object. All operations included.
|
||||
scannerMetricHealAbandonedObject
|
||||
|
||||
// END realtime metrics:
|
||||
scannerMetricLastRealtime
|
||||
|
||||
@@ -54,11 +54,10 @@ const (
|
||||
dataScannerForceCompactAtFolders = 1_000_000 // Compact when this many subfolders in a single folder (even top level).
|
||||
dataScannerStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
|
||||
|
||||
healDeleteDangling = true
|
||||
healFolderIncludeProb = 32 // Include a clean folder one in n cycles.
|
||||
healObjectSelectProb = 512 // Overall probability of a file being scanned; one in n.
|
||||
healDeleteDangling = true
|
||||
healObjectSelectProb = 1024 // Overall probability of a file being scanned; one in n.
|
||||
|
||||
dataScannerExcessiveVersionsThreshold = 1000 // Issue a warning when a single object has more versions than this
|
||||
dataScannerExcessiveVersionsThreshold = 100 // Issue a warning when a single object has more versions than this
|
||||
dataScannerExcessiveFoldersThreshold = 50000 // Issue a warning when a folder has more subfolders than this in a *set*
|
||||
)
|
||||
|
||||
@@ -66,7 +65,7 @@ var (
|
||||
globalHealConfig heal.Config
|
||||
|
||||
// Sleeper values are updated when config is loaded.
|
||||
scannerSleeper = newDynamicSleeper(10, 10*time.Second, true)
|
||||
scannerSleeper = newDynamicSleeper(2, time.Second, true) // Keep defaults same as config defaults
|
||||
scannerCycle = uatomic.NewDuration(dataScannerStartDelay)
|
||||
)
|
||||
|
||||
@@ -274,7 +273,6 @@ type folderScanner struct {
|
||||
// rarer if the bloom filter for the path is clean and no lifecycles are applied.
|
||||
// Skipped leaves have their totals transferred from the previous cycle.
|
||||
//
|
||||
// A clean leaf will be included once every healFolderIncludeProb for partial heal scans.
|
||||
// When selected there is a one in healObjectSelectProb that any object will be chosen for heal scan.
|
||||
//
|
||||
// Compaction happens when either:
|
||||
@@ -345,7 +343,6 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, basePath string, ca
|
||||
// No useful information...
|
||||
return cache, err
|
||||
}
|
||||
|
||||
s.newCache.Info.LastUpdate = UTCNow()
|
||||
s.newCache.Info.NextCycle = cache.Info.NextCycle
|
||||
return s.newCache, nil
|
||||
@@ -409,9 +406,9 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
||||
|
||||
var existingFolders, newFolders []cachedFolder
|
||||
var foundObjects bool
|
||||
err := readDirFn(path.Join(f.root, folder.name), func(entName string, typ os.FileMode) error {
|
||||
err := readDirFn(pathJoin(f.root, folder.name), func(entName string, typ os.FileMode) error {
|
||||
// Parse
|
||||
entName = pathClean(path.Join(folder.name, entName))
|
||||
entName = pathClean(pathJoin(folder.name, entName))
|
||||
if entName == "" || entName == folder.name {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" no entity (%s,%s)\n", f.root, entName)
|
||||
@@ -461,7 +458,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
||||
|
||||
// Get file size, ignore errors.
|
||||
item := scannerItem{
|
||||
Path: path.Join(f.root, entName),
|
||||
Path: pathJoin(f.root, entName),
|
||||
Typ: typ,
|
||||
bucket: bucket,
|
||||
prefix: path.Dir(prefix),
|
||||
@@ -480,7 +477,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
||||
item.heal.enabled = item.heal.enabled && f.healObjectSelect > 0
|
||||
|
||||
sz, err := f.getSize(item)
|
||||
if err != nil {
|
||||
if err != nil && err != errIgnoreFileContrib {
|
||||
wait() // wait to proceed to next entry.
|
||||
if err != errSkipFile && f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" getSize \"%v/%v\" returned err: %v\n", bucket, item.objectPath(), err)
|
||||
@@ -496,10 +493,12 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
||||
// Object already accounted for, remove from heal map,
|
||||
// simply because getSize() function already heals the
|
||||
// object.
|
||||
delete(abandonedChildren, path.Join(item.bucket, item.objectPath()))
|
||||
delete(abandonedChildren, pathJoin(item.bucket, item.objectPath()))
|
||||
|
||||
into.addSizes(sz)
|
||||
into.Objects++
|
||||
if err != errIgnoreFileContrib {
|
||||
into.addSizes(sz)
|
||||
into.Objects++
|
||||
}
|
||||
|
||||
wait() // wait to proceed to next entry.
|
||||
|
||||
@@ -691,6 +690,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
||||
reportNotFound: true,
|
||||
minDisks: f.disksQuorum,
|
||||
agreed: func(entry metaCacheEntry) {
|
||||
f.updateCurrentPath(entry.name)
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(healObjectsPrefix+" got agreement: %v\n", entry.name)
|
||||
}
|
||||
@@ -704,6 +704,13 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
||||
// this object might be dangling.
|
||||
entry, _ = entries.firstFound()
|
||||
}
|
||||
// wait timer per object.
|
||||
wait := scannerSleeper.Timer(ctx)
|
||||
defer wait()
|
||||
f.updateCurrentPath(entry.name)
|
||||
stopFn := globalScannerMetrics.log(scannerMetricHealAbandonedObject, f.root, entry.name)
|
||||
custom := make(map[string]string)
|
||||
defer stopFn(custom)
|
||||
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(healObjectsPrefix+" resolved to: %v, dir: %v\n", entry.name, entry.isDir())
|
||||
@@ -713,13 +720,9 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
||||
return
|
||||
}
|
||||
|
||||
// wait on timer per object.
|
||||
wait := scannerSleeper.Timer(ctx)
|
||||
|
||||
// We got an entry which we should be able to heal.
|
||||
fiv, err := entry.fileInfoVersions(bucket)
|
||||
if err != nil {
|
||||
wait()
|
||||
err := bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: entry.name,
|
||||
@@ -732,21 +735,28 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
||||
return
|
||||
}
|
||||
|
||||
custom["versions"] = fmt.Sprint(len(fiv.Versions))
|
||||
var successVersions, failVersions int
|
||||
for _, ver := range fiv.Versions {
|
||||
// Sleep and reset.
|
||||
wait()
|
||||
wait = scannerSleeper.Timer(ctx)
|
||||
|
||||
stopFn := globalScannerMetrics.timeSize(scannerMetricHealAbandonedVersion)
|
||||
err := bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: fiv.Name,
|
||||
versionID: ver.VersionID,
|
||||
}, madmin.HealItemObject)
|
||||
stopFn(int(ver.Size))
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
if err == nil {
|
||||
successVersions++
|
||||
} else {
|
||||
failVersions++
|
||||
}
|
||||
foundObjs = foundObjs || err == nil
|
||||
}
|
||||
custom["success_versions"] = fmt.Sprint(successVersions)
|
||||
custom["failed_versions"] = fmt.Sprint(failVersions)
|
||||
},
|
||||
// Too many disks failed.
|
||||
finished: func(errs []error) {
|
||||
@@ -873,7 +883,7 @@ type getSizeFn func(item scannerItem) (sizeSummary, error)
|
||||
func (i *scannerItem) transformMetaDir() {
|
||||
split := strings.Split(i.prefix, SlashSeparator)
|
||||
if len(split) > 1 {
|
||||
i.prefix = path.Join(split[:len(split)-1]...)
|
||||
i.prefix = pathJoin(split[:len(split)-1]...)
|
||||
} else {
|
||||
i.prefix = ""
|
||||
}
|
||||
@@ -909,18 +919,20 @@ func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi Object
|
||||
return 0
|
||||
}
|
||||
|
||||
func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi ObjectInfo) (applied bool, size int64) {
|
||||
func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi ObjectInfo) (action lifecycle.Action, size int64) {
|
||||
size, err := oi.GetActualSize()
|
||||
if i.debug {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
if i.lifeCycle == nil {
|
||||
return false, size
|
||||
return action, size
|
||||
}
|
||||
|
||||
versionID := oi.VersionID
|
||||
vcfg, _ := globalBucketVersioningSys.Get(i.bucket)
|
||||
rCfg, _ := globalBucketObjectLockSys.Get(i.bucket)
|
||||
lcEvt := evalActionFromLifecycle(ctx, *i.lifeCycle, rCfg, oi)
|
||||
replcfg, _ := getReplicationConfig(ctx, i.bucket)
|
||||
lcEvt := evalActionFromLifecycle(ctx, *i.lifeCycle, rCfg, replcfg, oi)
|
||||
if i.debug {
|
||||
if versionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" lifecycle: %q (version-id=%s), Initial scan: %v\n", i.objectPath(), versionID, lcEvt.Action)
|
||||
@@ -929,7 +941,7 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi Obje
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
if applied {
|
||||
if lcEvt.Action != lifecycle.NoneAction {
|
||||
numVersions := uint64(1)
|
||||
if lcEvt.Action == lifecycle.DeleteAllVersionsAction {
|
||||
numVersions = uint64(oi.NumVersions)
|
||||
@@ -939,14 +951,21 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi Obje
|
||||
}()
|
||||
|
||||
switch lcEvt.Action {
|
||||
case lifecycle.DeleteAction, lifecycle.DeleteVersionAction, lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction, lifecycle.DeleteAllVersionsAction:
|
||||
return applyLifecycleAction(lcEvt, lcEventSrc_Scanner, oi), 0
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
return applyLifecycleAction(lcEvt, lcEventSrc_Scanner, oi), size
|
||||
default:
|
||||
// No action.
|
||||
return false, size
|
||||
// This version doesn't contribute towards sizeS only when it is permanently deleted.
|
||||
// This can happen when,
|
||||
// - ExpireObjectAllVersions flag is enabled
|
||||
// - NoncurrentVersionExpiration is applicable
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAllVersionsAction:
|
||||
size = 0
|
||||
case lifecycle.DeleteAction:
|
||||
// On a non-versioned bucket, DeleteObject removes the only version permanently.
|
||||
if !vcfg.PrefixEnabled(oi.Name) {
|
||||
size = 0
|
||||
}
|
||||
}
|
||||
|
||||
applyLifecycleAction(lcEvt, lcEventSrc_Scanner, oi)
|
||||
return lcEvt.Action, size
|
||||
}
|
||||
|
||||
// applyTierObjSweep removes remote object pending deletion and the free-version
|
||||
@@ -1061,17 +1080,6 @@ func (i *scannerItem) applyNewerNoncurrentVersionLimit(ctx context.Context, _ Ob
|
||||
// applyVersionActions will apply lifecycle checks on all versions of a scanned item. Returns versions that remain
|
||||
// after applying lifecycle checks configured.
|
||||
func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fivs []FileInfo) ([]ObjectInfo, error) {
|
||||
if i.heal.enabled {
|
||||
if healDeleteDangling {
|
||||
done := globalScannerMetrics.time(scannerMetricCleanAbandoned)
|
||||
err := o.CheckAbandonedParts(ctx, i.bucket, i.objectPath(), madmin.HealOpts{Remove: healDeleteDangling})
|
||||
done()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", i.bucket, i.objectPath(), err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
objInfos, err := i.applyNewerNoncurrentVersionLimit(ctx, o, fivs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1099,29 +1107,46 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi
|
||||
// The resulting size on disk will always be returned.
|
||||
// The metadata will be compared to consensus on the object layer before any changes are applied.
|
||||
// If no metadata is supplied, -1 is returned if no action is taken.
|
||||
func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) int64 {
|
||||
func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) (objDeleted bool, size int64) {
|
||||
done := globalScannerMetrics.time(scannerMetricILM)
|
||||
applied, size := i.applyLifecycle(ctx, o, oi)
|
||||
var action lifecycle.Action
|
||||
action, size = i.applyLifecycle(ctx, o, oi)
|
||||
done()
|
||||
|
||||
// Note: objDeleted is true if and only if action ==
|
||||
// lifecycle.DeleteAllVersionsAction
|
||||
if action == lifecycle.DeleteAllVersionsAction {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
// For instance, an applied lifecycle means we remove/transitioned an object
|
||||
// from the current deployment, which means we don't have to call healing
|
||||
// routine even if we are asked to do via heal flag.
|
||||
if !applied {
|
||||
if action == lifecycle.NoneAction {
|
||||
if i.heal.enabled {
|
||||
done := globalScannerMetrics.time(scannerMetricHealCheck)
|
||||
size = i.applyHealing(ctx, o, oi)
|
||||
done()
|
||||
|
||||
if healDeleteDangling {
|
||||
done := globalScannerMetrics.time(scannerMetricCleanAbandoned)
|
||||
err := o.CheckAbandonedParts(ctx, i.bucket, i.objectPath(), madmin.HealOpts{Remove: healDeleteDangling})
|
||||
done()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", i.bucket, i.objectPath(), err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// replicate only if lifecycle rules are not applied.
|
||||
done := globalScannerMetrics.time(scannerMetricCheckReplication)
|
||||
i.healReplication(ctx, o, oi.Clone(), sizeS)
|
||||
done()
|
||||
}
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
|
||||
func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr lock.Retention, obj ObjectInfo) lifecycle.Event {
|
||||
func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr lock.Retention, rcfg *replication.Config, obj ObjectInfo) lifecycle.Event {
|
||||
event := lc.Eval(obj.ToLifecycleOpts())
|
||||
if serverDebugLog {
|
||||
console.Debugf(applyActionsLogPrefix+" lifecycle: Secondary scan: %v\n", event.Action)
|
||||
@@ -1153,6 +1178,9 @@ func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr loc
|
||||
}
|
||||
return lifecycle.Event{Action: lifecycle.NoneAction}
|
||||
}
|
||||
if rcfg != nil && !obj.VersionPurgeStatus.Empty() && rcfg.HasActiveRules(obj.Name, true) {
|
||||
return lifecycle.Event{Action: lifecycle.NoneAction}
|
||||
}
|
||||
}
|
||||
|
||||
return event
|
||||
@@ -1247,7 +1275,7 @@ func applyLifecycleAction(event lifecycle.Event, src lcEventSrc, obj ObjectInfo)
|
||||
|
||||
// objectPath returns the prefix and object name.
|
||||
func (i *scannerItem) objectPath() string {
|
||||
return path.Join(i.prefix, i.objectName)
|
||||
return pathJoin(i.prefix, i.objectName)
|
||||
}
|
||||
|
||||
// healReplication will heal a scanned item that has failed replication.
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
"github.com/valyala/bytebufferpool"
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
@@ -77,8 +78,8 @@ func newAllTierStats() *allTierStats {
|
||||
}
|
||||
}
|
||||
|
||||
func (ats *allTierStats) addSizes(sz sizeSummary) {
|
||||
for tier, st := range sz.tiers {
|
||||
func (ats *allTierStats) addSizes(tiers map[string]tierStats) {
|
||||
for tier, st := range tiers {
|
||||
ats.Tiers[tier] = ats.Tiers[tier].add(st)
|
||||
}
|
||||
}
|
||||
@@ -94,18 +95,16 @@ func (ats *allTierStats) clone() *allTierStats {
|
||||
return nil
|
||||
}
|
||||
dst := *ats
|
||||
if dst.Tiers != nil {
|
||||
dst.Tiers = make(map[string]tierStats, len(dst.Tiers))
|
||||
for tier, st := range dst.Tiers {
|
||||
dst.Tiers[tier] = st
|
||||
}
|
||||
dst.Tiers = make(map[string]tierStats, len(ats.Tiers))
|
||||
for tier, st := range ats.Tiers {
|
||||
dst.Tiers[tier] = st
|
||||
}
|
||||
return &dst
|
||||
}
|
||||
|
||||
func (ats *allTierStats) adminStats(stats map[string]madmin.TierStats) map[string]madmin.TierStats {
|
||||
func (ats *allTierStats) populateStats(stats map[string]madmin.TierStats) {
|
||||
if ats == nil {
|
||||
return stats
|
||||
return
|
||||
}
|
||||
|
||||
// Update stats for tiers as they become available.
|
||||
@@ -116,7 +115,7 @@ func (ats *allTierStats) adminStats(stats map[string]madmin.TierStats) map[strin
|
||||
NumObjects: st.NumObjects,
|
||||
}
|
||||
}
|
||||
return stats
|
||||
return
|
||||
}
|
||||
|
||||
// tierStats holds per-tier stats of a remote tier.
|
||||
@@ -127,10 +126,11 @@ type tierStats struct {
|
||||
}
|
||||
|
||||
func (ts tierStats) add(u tierStats) tierStats {
|
||||
ts.TotalSize += u.TotalSize
|
||||
ts.NumVersions += u.NumVersions
|
||||
ts.NumObjects += u.NumObjects
|
||||
return ts
|
||||
return tierStats{
|
||||
TotalSize: ts.TotalSize + u.TotalSize,
|
||||
NumVersions: ts.NumVersions + u.NumVersions,
|
||||
NumObjects: ts.NumObjects + u.NumObjects,
|
||||
}
|
||||
}
|
||||
|
||||
//msgp:tuple replicationStatsV1
|
||||
@@ -196,12 +196,11 @@ func (r *replicationAllStats) clone() *replicationAllStats {
|
||||
dst := *r
|
||||
|
||||
// Copy individual targets.
|
||||
if dst.Targets != nil {
|
||||
dst.Targets = make(map[string]replicationStats, len(dst.Targets))
|
||||
for k, v := range r.Targets {
|
||||
dst.Targets[k] = v
|
||||
}
|
||||
dst.Targets = make(map[string]replicationStats, len(r.Targets))
|
||||
for k, v := range r.Targets {
|
||||
dst.Targets[k] = v
|
||||
}
|
||||
|
||||
return &dst
|
||||
}
|
||||
|
||||
@@ -346,26 +345,24 @@ func (e *dataUsageEntry) addSizes(summary sizeSummary) {
|
||||
e.ReplicationStats.ReplicaSize += uint64(summary.replicaSize)
|
||||
e.ReplicationStats.ReplicaCount += uint64(summary.replicaCount)
|
||||
|
||||
if summary.replTargetStats != nil {
|
||||
for arn, st := range summary.replTargetStats {
|
||||
tgtStat, ok := e.ReplicationStats.Targets[arn]
|
||||
if !ok {
|
||||
tgtStat = replicationStats{}
|
||||
}
|
||||
tgtStat.PendingSize += uint64(st.pendingSize)
|
||||
tgtStat.FailedSize += uint64(st.failedSize)
|
||||
tgtStat.ReplicatedSize += uint64(st.replicatedSize)
|
||||
tgtStat.ReplicatedCount += uint64(st.replicatedCount)
|
||||
tgtStat.FailedCount += st.failedCount
|
||||
tgtStat.PendingCount += st.pendingCount
|
||||
e.ReplicationStats.Targets[arn] = tgtStat
|
||||
for arn, st := range summary.replTargetStats {
|
||||
tgtStat, ok := e.ReplicationStats.Targets[arn]
|
||||
if !ok {
|
||||
tgtStat = replicationStats{}
|
||||
}
|
||||
tgtStat.PendingSize += uint64(st.pendingSize)
|
||||
tgtStat.FailedSize += uint64(st.failedSize)
|
||||
tgtStat.ReplicatedSize += uint64(st.replicatedSize)
|
||||
tgtStat.ReplicatedCount += uint64(st.replicatedCount)
|
||||
tgtStat.FailedCount += st.failedCount
|
||||
tgtStat.PendingCount += st.pendingCount
|
||||
e.ReplicationStats.Targets[arn] = tgtStat
|
||||
}
|
||||
if summary.tiers != nil {
|
||||
if len(summary.tiers) != 0 {
|
||||
if e.AllTierStats == nil {
|
||||
e.AllTierStats = newAllTierStats()
|
||||
}
|
||||
e.AllTierStats.addSizes(summary)
|
||||
e.AllTierStats.addSizes(summary.tiers)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -404,7 +401,7 @@ func (e *dataUsageEntry) merge(other dataUsageEntry) {
|
||||
e.ObjVersions[i] += v
|
||||
}
|
||||
|
||||
if other.AllTierStats != nil {
|
||||
if other.AllTierStats != nil && len(other.AllTierStats.Tiers) != 0 {
|
||||
if e.AllTierStats == nil {
|
||||
e.AllTierStats = newAllTierStats()
|
||||
}
|
||||
@@ -925,39 +922,55 @@ type objectIO interface {
|
||||
// load the cache content with name from minioMetaBackgroundOpsBucket.
|
||||
// Only backend errors are returned as errors.
|
||||
// The loader is optimistic and has no locking, but tries 5 times before giving up.
|
||||
// If the object is not found or unable to deserialize d is cleared and nil error is returned.
|
||||
// If the object is not found, a nil error with empty data usage cache is returned.
|
||||
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
|
||||
// Abandon if more than 5 minutes, so we don't hold up scanner.
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
defer cancel()
|
||||
// By defaut, empty data usage cache
|
||||
*d = dataUsageCache{}
|
||||
|
||||
load := func(name string, timeout time.Duration) (bool, error) {
|
||||
// Abandon if more than time.Minute, so we don't hold up scanner.
|
||||
// drive timeout by default is 2 minutes, we do not need to wait longer.
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
// Caches are read+written without locks,
|
||||
retries := 0
|
||||
for retries < 5 {
|
||||
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, ObjectOptions{NoLock: true})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectNotFound, BucketNotFound:
|
||||
return false, nil
|
||||
case InsufficientReadQuorum, StorageErr:
|
||||
retries++
|
||||
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
|
||||
continue
|
||||
default:
|
||||
return toObjectErr(err, dataUsageBucket, name)
|
||||
return true, nil
|
||||
}
|
||||
*d = dataUsageCache{}
|
||||
return nil
|
||||
}
|
||||
if err := d.deserialize(r); err != nil {
|
||||
r.Close()
|
||||
retries++
|
||||
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
|
||||
continue
|
||||
return false, err
|
||||
}
|
||||
err = d.deserialize(r)
|
||||
r.Close()
|
||||
return nil
|
||||
return err != nil, nil
|
||||
}
|
||||
*d = dataUsageCache{}
|
||||
|
||||
// Caches are read+written without locks,
|
||||
retries := 0
|
||||
for retries < 5 {
|
||||
retry, err := load(name, time.Minute)
|
||||
if err != nil {
|
||||
return toObjectErr(err, dataUsageBucket, name)
|
||||
}
|
||||
if !retry {
|
||||
break
|
||||
}
|
||||
retry, err = load(name+".bkp", 30*time.Second)
|
||||
if err == nil && !retry {
|
||||
// Only return when we have valid data from the backup
|
||||
break
|
||||
}
|
||||
retries++
|
||||
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
|
||||
}
|
||||
|
||||
if retries == 5 {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("maximum retry reached to load the data usage cache `%s`", name), "retry-loading-data-usage-cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -967,47 +980,47 @@ var maxConcurrentScannerSaves = make(chan struct{}, 4)
|
||||
// save the content of the cache to minioMetaBackgroundOpsBucket with the provided name.
|
||||
// Note that no locking is done when saving.
|
||||
func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) error {
|
||||
var r io.Reader
|
||||
maxConcurrentScannerSaves <- struct{}{}
|
||||
defer func() {
|
||||
<-maxConcurrentScannerSaves
|
||||
}()
|
||||
// If big, do streaming...
|
||||
size := int64(-1)
|
||||
if len(d.Cache) > 10000 {
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
pw.CloseWithError(d.serializeTo(pw))
|
||||
}()
|
||||
defer pr.Close()
|
||||
r = pr
|
||||
} else {
|
||||
var buf bytes.Buffer
|
||||
err := d.serializeTo(&buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r = &buf
|
||||
size = int64(buf.Len())
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case maxConcurrentScannerSaves <- struct{}{}:
|
||||
}
|
||||
|
||||
hr, err := hash.NewReader(r, size, "", "", size)
|
||||
if err != nil {
|
||||
buf := bytebufferpool.Get()
|
||||
defer func() {
|
||||
<-maxConcurrentScannerSaves
|
||||
buf.Reset()
|
||||
bytebufferpool.Put(buf)
|
||||
}()
|
||||
|
||||
if err := d.serializeTo(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Abandon if more than 5 minutes, so we don't hold up scanner.
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
defer cancel()
|
||||
_, err = store.PutObject(ctx,
|
||||
dataUsageBucket,
|
||||
name,
|
||||
NewPutObjReader(hr),
|
||||
ObjectOptions{NoLock: true})
|
||||
if isErrBucketNotFound(err) {
|
||||
return nil
|
||||
save := func(name string, timeout time.Duration) error {
|
||||
hr, err := hash.NewReader(ctx, bytes.NewReader(buf.Bytes()), int64(buf.Len()), "", "", int64(buf.Len()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Abandon if more than a minute, so we don't hold up scanner.
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
_, err = store.PutObject(ctx,
|
||||
dataUsageBucket,
|
||||
name,
|
||||
NewPutObjReader(hr),
|
||||
ObjectOptions{NoLock: true})
|
||||
if isErrBucketNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
defer save(name+".bkp", 5*time.Second) // Keep a backup as well
|
||||
|
||||
// drive timeout by default is 2 minutes, we do not need to wait longer.
|
||||
return save(name, time.Minute)
|
||||
}
|
||||
|
||||
// dataUsageCacheVer indicates the cache version.
|
||||
|
||||
@@ -107,35 +107,22 @@ func (dui DataUsageInfo) tierStats() []madmin.TierInfo {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfgs := globalTierConfigMgr.ListTiers()
|
||||
if len(cfgs) == 0 {
|
||||
if globalTierConfigMgr.Empty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
ts := make(map[string]madmin.TierStats, len(cfgs)+1)
|
||||
ts := make(map[string]madmin.TierStats)
|
||||
dui.TierStats.populateStats(ts)
|
||||
|
||||
infos := make([]madmin.TierInfo, 0, len(ts))
|
||||
|
||||
// Add STANDARD (hot-tier)
|
||||
ts[minioHotTier] = madmin.TierStats{}
|
||||
infos = append(infos, madmin.TierInfo{
|
||||
Name: minioHotTier,
|
||||
Type: "internal",
|
||||
})
|
||||
// Add configured remote tiers
|
||||
for _, cfg := range cfgs {
|
||||
ts[cfg.Name] = madmin.TierStats{}
|
||||
for tier, stats := range ts {
|
||||
infos = append(infos, madmin.TierInfo{
|
||||
Name: cfg.Name,
|
||||
Type: cfg.Type.String(),
|
||||
Name: tier,
|
||||
Type: globalTierConfigMgr.TierType(tier),
|
||||
Stats: stats,
|
||||
})
|
||||
}
|
||||
|
||||
ts = dui.TierStats.adminStats(ts)
|
||||
for i := range infos {
|
||||
info := infos[i]
|
||||
infos[i].Stats = ts[info.Name]
|
||||
}
|
||||
|
||||
sort.Slice(infos, func(i, j int) bool {
|
||||
if infos[i].Type == "internal" {
|
||||
return true
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
@@ -42,6 +43,7 @@ const (
|
||||
|
||||
// storeDataUsageInBackend will store all objects sent on the gui channel until closed.
|
||||
func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan DataUsageInfo) {
|
||||
attempts := 1
|
||||
for dataUsageInfo := range dui {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
dataUsageJSON, err := json.Marshal(dataUsageInfo)
|
||||
@@ -49,12 +51,19 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
if attempts > 10 {
|
||||
saveConfig(ctx, objAPI, dataUsageObjNamePath+".bkp", dataUsageJSON) // Save a backup every 10th update.
|
||||
attempts = 1
|
||||
}
|
||||
if err = saveConfig(ctx, objAPI, dataUsageObjNamePath, dataUsageJSON); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
attempts++
|
||||
}
|
||||
}
|
||||
|
||||
var prefixUsageCache timedValue
|
||||
|
||||
// loadPrefixUsageFromBackend returns prefix usages found in passed buckets
|
||||
//
|
||||
// e.g.: /testbucket/prefix => 355601334
|
||||
@@ -67,37 +76,57 @@ func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket
|
||||
|
||||
cache := dataUsageCache{}
|
||||
|
||||
m := make(map[string]uint64)
|
||||
for _, pool := range z.serverPools {
|
||||
for _, er := range pool.sets {
|
||||
// Load bucket usage prefixes
|
||||
if err := cache.load(ctx, er, bucket+slashSeparator+dataUsageCacheName); err == nil {
|
||||
root := cache.find(bucket)
|
||||
if root == nil {
|
||||
// We dont have usage information for this bucket in this
|
||||
// set, go to the next set
|
||||
continue
|
||||
}
|
||||
prefixUsageCache.Once.Do(func() {
|
||||
prefixUsageCache.TTL = 30 * time.Second
|
||||
|
||||
for id, usageInfo := range cache.flattenChildrens(*root) {
|
||||
prefix := decodeDirObject(strings.TrimPrefix(id, bucket+slashSeparator))
|
||||
// decodeDirObject to avoid any __XLDIR__ objects
|
||||
m[prefix] += uint64(usageInfo.Size)
|
||||
// No need to fail upon Update() error, fallback to old value.
|
||||
prefixUsageCache.Relax = true
|
||||
prefixUsageCache.Update = func() (interface{}, error) {
|
||||
m := make(map[string]uint64)
|
||||
for _, pool := range z.serverPools {
|
||||
for _, er := range pool.sets {
|
||||
// Load bucket usage prefixes
|
||||
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ok := cache.load(ctx, er, bucket+slashSeparator+dataUsageCacheName) == nil
|
||||
done()
|
||||
if ok {
|
||||
root := cache.find(bucket)
|
||||
if root == nil {
|
||||
// We dont have usage information for this bucket in this
|
||||
// set, go to the next set
|
||||
continue
|
||||
}
|
||||
|
||||
for id, usageInfo := range cache.flattenChildrens(*root) {
|
||||
prefix := decodeDirObject(strings.TrimPrefix(id, bucket+slashSeparator))
|
||||
// decodeDirObject to avoid any __XLDIR__ objects
|
||||
m[prefix] += uint64(usageInfo.Size)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
})
|
||||
|
||||
v, _ := prefixUsageCache.Get()
|
||||
if v != nil {
|
||||
return v.(map[string]uint64), nil
|
||||
}
|
||||
|
||||
return m, nil
|
||||
return map[string]uint64{}, nil
|
||||
}
|
||||
|
||||
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) {
|
||||
buf, err := readConfig(ctx, objAPI, dataUsageObjNamePath)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return DataUsageInfo{}, nil
|
||||
buf, err = readConfig(ctx, objAPI, dataUsageObjNamePath+".bkp")
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return DataUsageInfo{}, nil
|
||||
}
|
||||
return DataUsageInfo{}, toObjectErr(err, minioMetaBucket, dataUsageObjNamePath)
|
||||
}
|
||||
return DataUsageInfo{}, toObjectErr(err, minioMetaBucket, dataUsageObjNamePath)
|
||||
}
|
||||
|
||||
var dataUsageInfo DataUsageInfo
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,60 +0,0 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
* MinIO Object Storage (c) 2021 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/djherbis/atime"
|
||||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
// Return error if Atime is disabled on the O/S
|
||||
func checkAtimeSupport(dir string) (err error) {
|
||||
file, err := os.CreateTemp(dir, "prefix")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.Remove(file.Name())
|
||||
defer file.Close()
|
||||
finfo1, err := os.Stat(file.Name())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
atime.Get(finfo1)
|
||||
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\CurrentControlSet\Control\FileSystem`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer k.Close()
|
||||
|
||||
setting, _, err := k.GetIntegerValue("NtfsDisableLastAccessUpdate")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
lowSetting := setting & 0xFFFF
|
||||
if lowSetting != uint64(0x0000) && lowSetting != uint64(0x0002) {
|
||||
return errors.New("Atime not supported")
|
||||
}
|
||||
return
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user