Compare commits
276 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a3ba8188d7 | ||
|
|
2760fc86af | ||
|
|
abb14aeec1 | ||
|
|
8ceb2a93fd | ||
|
|
c2f16ee846 | ||
|
|
071c004f8b | ||
|
|
6484453fc6 | ||
|
|
a0d0645128 | ||
|
|
1738eb24b1 | ||
|
|
253194e491 | ||
|
|
736e58dd68 | ||
|
|
907a171edd | ||
|
|
ed6d2a100f | ||
|
|
effe131090 | ||
|
|
01498a3e34 | ||
|
|
18063bf25c | ||
|
|
57f0176759 | ||
|
|
dbbed6f7f0 | ||
|
|
7fbfdceba3 | ||
|
|
9dda9fb903 | ||
|
|
f1418a50f0 | ||
|
|
017954e7ea | ||
|
|
806625cbff | ||
|
|
045e30f2c1 | ||
|
|
c6a9a94f94 | ||
|
|
8e7c00f3d4 | ||
|
|
d1ed1da8c6 | ||
|
|
23e8390997 | ||
|
|
71403be912 | ||
|
|
f28d02b7f2 | ||
|
|
e0cb814f3f | ||
|
|
98a08e1644 | ||
|
|
3047121255 | ||
|
|
5a7f92481e | ||
|
|
0d45c38782 | ||
|
|
56d1b227cf | ||
|
|
061fa0635c | ||
|
|
6e138f955e | ||
|
|
bea87a5a20 | ||
|
|
2b4eb87d77 | ||
|
|
799758e54f | ||
|
|
1f9abbee4d | ||
|
|
fdf0ae9167 | ||
|
|
00eb6f6bc9 | ||
|
|
66174692a2 | ||
|
|
849fcf0127 | ||
|
|
209680e89f | ||
|
|
e0c04a2da0 | ||
|
|
27d9bd04e5 | ||
|
|
511424a287 | ||
|
|
bebcf4f004 | ||
|
|
eafa775952 | ||
|
|
66b4a862e0 | ||
|
|
9603489dd3 | ||
|
|
b302c8a5f4 | ||
|
|
4de88e87bb | ||
|
|
b880796aef | ||
|
|
37a5d5d7a0 | ||
|
|
3cac262dd1 | ||
|
|
e6ab4db6b8 | ||
|
|
ca989eb0b3 | ||
|
|
d778d034e7 | ||
|
|
df08fd1f03 | ||
|
|
ac82f416a4 | ||
|
|
f7f9517b6a | ||
|
|
90cff10e2b | ||
|
|
81caf35926 | ||
|
|
5726cef3ca | ||
|
|
5fdf47b118 | ||
|
|
8b74a72b21 | ||
|
|
eec69d6796 | ||
|
|
0537a21b79 | ||
|
|
4c54ed8748 | ||
|
|
a4006e23a0 | ||
|
|
b17dc81540 | ||
|
|
d73c4f09f3 | ||
|
|
4c81201f95 | ||
|
|
cd8d511d3d | ||
|
|
899a2fa1c7 | ||
|
|
17e17da00d | ||
|
|
a5da9120f3 | ||
|
|
aa12d75d75 | ||
|
|
dd4a2d7419 | ||
|
|
6fcbdd5607 | ||
|
|
3831cc9e3b | ||
|
|
230fc0d186 | ||
|
|
7f9498f43f | ||
|
|
1cf322b7d4 | ||
|
|
3168e93730 | ||
|
|
0b1c824618 | ||
|
|
c851e022b7 | ||
|
|
6f45e303f5 | ||
|
|
5ad032826a | ||
|
|
84bf4624a4 | ||
|
|
dff37aa33d | ||
|
|
d12831eb07 | ||
|
|
4a36cd7035 | ||
|
|
00555c747e | ||
|
|
03490c811b | ||
|
|
48d2c03250 | ||
|
|
ed78854cea | ||
|
|
e60834838f | ||
|
|
d616d8a857 | ||
|
|
24cab7f9df | ||
|
|
b2536476c9 | ||
|
|
02c1a08a5b | ||
|
|
5c47ce456e | ||
|
|
8ea55f9dba | ||
|
|
80e3dce631 | ||
|
|
80fab03b63 | ||
|
|
730d2dc7be | ||
|
|
0ee9678190 | ||
|
|
34859c6d4b | ||
|
|
b1c99e88ac | ||
|
|
0104af6bcc | ||
|
|
224daee391 | ||
|
|
34ea1d2167 | ||
|
|
9d95937018 | ||
|
|
74a7889a3e | ||
|
|
fa01e640f5 | ||
|
|
f355374962 | ||
|
|
bda0fe3150 | ||
|
|
b70995dd60 | ||
|
|
4b6264da7d | ||
|
|
48919de301 | ||
|
|
eb3ded420e | ||
|
|
eb2934f0c1 | ||
|
|
b7438fe4e6 | ||
|
|
ce6cef6855 | ||
|
|
a966ccd17d | ||
|
|
493c714663 | ||
|
|
e959c5d71c | ||
|
|
4a2928eb49 | ||
|
|
af88772a78 | ||
|
|
1dce6918c2 | ||
|
|
9109148474 | ||
|
|
eaaf05a7cc | ||
|
|
52e21bc853 | ||
|
|
16e1a25bc0 | ||
|
|
958661cbb5 | ||
|
|
6019628f7d | ||
|
|
0987069e37 | ||
|
|
6a0372be6c | ||
|
|
c13afd56e8 | ||
|
|
96997d2b21 | ||
|
|
86a3319d41 | ||
|
|
a694ba93d9 | ||
|
|
9f60e84ce1 | ||
|
|
a9aaea0d67 | ||
|
|
572b1721b2 | ||
|
|
b0e1d4ce78 | ||
|
|
eb19c8af40 | ||
|
|
746f1585eb | ||
|
|
2d58a8d861 | ||
|
|
0037951b6e | ||
|
|
fbd1c5f51a | ||
|
|
1c6781757c | ||
|
|
b4e3956e69 | ||
|
|
c51229493b | ||
|
|
631d55aa22 | ||
|
|
8a291e1dc0 | ||
|
|
650dccfa9e | ||
|
|
d08b4b147d | ||
|
|
9a703befe6 | ||
|
|
9a1615768d | ||
|
|
37da0c647e | ||
|
|
2acb530ccd | ||
|
|
3e1fb17b70 | ||
|
|
a89d6b8e3d | ||
|
|
1c085f7d1a | ||
|
|
4b6585d249 | ||
|
|
9ffad7fceb | ||
|
|
18725679c4 | ||
|
|
ba8a8ad818 | ||
|
|
102ad60dee | ||
|
|
cb61e50b51 | ||
|
|
859ef52886 | ||
|
|
f04a1f220c | ||
|
|
cd380251b3 | ||
|
|
92cd1eed45 | ||
|
|
db32a24cb6 | ||
|
|
2d96940826 | ||
|
|
e730da1438 | ||
|
|
46ee8659b4 | ||
|
|
73a6b4ea11 | ||
|
|
c1b88c17cc | ||
|
|
a359e36e35 | ||
|
|
0a2e6d58a5 | ||
|
|
7e80afdd7f | ||
|
|
1b119557c2 | ||
|
|
7778fef6bb | ||
|
|
ea1803417f | ||
|
|
ea5094e842 | ||
|
|
5a974fb10c | ||
|
|
9acdeab73d | ||
|
|
d19b434ffc | ||
|
|
17a1eda702 | ||
|
|
7d50a0cfea | ||
|
|
ceff7bcca5 | ||
|
|
7d1734d033 | ||
|
|
03ec6adfd0 | ||
|
|
309b10f201 | ||
|
|
2a8e40f19f | ||
|
|
5f7bd2b1da | ||
|
|
c097ce9c32 | ||
|
|
caad314faa | ||
|
|
bc2ebe0021 | ||
|
|
21e8440423 | ||
|
|
11aa393ba7 | ||
|
|
d0c910a6f3 | ||
|
|
81c90ae430 | ||
|
|
d15a5ad4cc | ||
|
|
0ff246653b | ||
|
|
113bcbdb78 | ||
|
|
95411228db | ||
|
|
23774353b7 | ||
|
|
052b5262ff | ||
|
|
a2a5ec93d3 | ||
|
|
331c517a5b | ||
|
|
27a774cbe9 | ||
|
|
8e6787a302 | ||
|
|
59352d0ac2 | ||
|
|
75d44b3bae | ||
|
|
95ae6c4b49 | ||
|
|
98ca770f81 | ||
|
|
ccd967e3be | ||
|
|
0ebb73ee2e | ||
|
|
c8b84a0e9e | ||
|
|
3acb5cff45 | ||
|
|
ab801ad3d4 | ||
|
|
74116204ce | ||
|
|
2eb5f934d8 | ||
|
|
b43d376a87 | ||
|
|
e4a44f6224 | ||
|
|
0272973175 | ||
|
|
adca28801d | ||
|
|
d2a3f92452 | ||
|
|
ede86845e5 | ||
|
|
e57c742674 | ||
|
|
bb5976d727 | ||
|
|
670724184c | ||
|
|
f7c1a59de1 | ||
|
|
01a2ccc52f | ||
|
|
51ba1dac49 | ||
|
|
a4463dd40f | ||
|
|
83a82d818e | ||
|
|
1d1c4430b2 | ||
|
|
4e00b47b52 | ||
|
|
43e6d1ce2d | ||
|
|
30da442a85 | ||
|
|
038d91feaa | ||
|
|
e7ba78beee | ||
|
|
ab43804efd | ||
|
|
1c865dd119 | ||
|
|
b32d0a5b60 | ||
|
|
79e21601b0 | ||
|
|
34253aa595 | ||
|
|
79ed7ce451 | ||
|
|
900eebb9a4 | ||
|
|
6914b2c99d | ||
|
|
0dd3a08169 | ||
|
|
f8f290e848 | ||
|
|
9179cdfc9d | ||
|
|
76b6dc0112 | ||
|
|
ce303f5c7e | ||
|
|
b4b7a18497 | ||
|
|
1e2ebc9945 | ||
|
|
a49e3647b6 | ||
|
|
954e17c3d0 | ||
|
|
2a9819aff8 | ||
|
|
8049184dcc | ||
|
|
6c6137b2e7 | ||
|
|
19c4f3082b | ||
|
|
433c2831ae | ||
|
|
9138b2b503 | ||
|
|
6d64aab420 |
1
.github/stale.yml
vendored
1
.github/stale.yml
vendored
@@ -14,6 +14,7 @@ onlyLabels: []
|
||||
exemptLabels:
|
||||
- "security"
|
||||
- "pending discussion"
|
||||
- "do not close"
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
51
.github/workflows/codeql.yml
vendored
51
.github/workflows/codeql.yml
vendored
@@ -1,51 +0,0 @@
|
||||
name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 19 * * 0'
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
|
||||
# CodeQL runs on ubuntu-latest and windows-latest
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: go, javascript
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
8
.github/workflows/go.yml
vendored
8
.github/workflows/go.yml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- release
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -12,7 +11,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.13.x]
|
||||
go-version: [1.14.x, 1.15.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -38,7 +37,12 @@ jobs:
|
||||
GO111MODULE: on
|
||||
MINIO_CI_CD: 1
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
sudo apt-get install devscripts shellcheck
|
||||
nancy_version=$(curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/sonatype-nexus-community/nancy/releases/latest | sed "s/https:\/\/github.com\/sonatype-nexus-community\/nancy\/releases\/tag\///")
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-linux.amd64-${nancy_version} && chmod +x nancy
|
||||
go list -m all | ./nancy sleuth
|
||||
make
|
||||
diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
|
||||
0
.nancy-ignore
Normal file
0
.nancy-ignore
Normal file
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.13-alpine as builder
|
||||
FROM golang:1.14-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
|
||||
13
Makefile
13
Makefile
@@ -17,11 +17,18 @@ checks:
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
|
||||
@which ruleguard 1>/dev/null || (echo "Installing ruleguard" && GO111MODULE=off go get github.com/quasilyte/go-ruleguard/...)
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && GO111MODULE=off go get github.com/tinylib/msgp)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && GO111MODULE=off go get golang.org/x/tools/cmd/stringer)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps fmt lint
|
||||
verifiers: getdeps fmt lint ruleguard check-gen
|
||||
|
||||
check-gen:
|
||||
@go generate ./... >/dev/null
|
||||
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
|
||||
|
||||
fmt:
|
||||
@echo "Running $@ check"
|
||||
@@ -33,6 +40,10 @@ lint:
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
||||
|
||||
ruleguard:
|
||||
@echo "Running $@ check"
|
||||
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go github.com/minio/minio/...
|
||||
|
||||
# Builds minio, runs the verifiers then runs the tests.
|
||||
check: test
|
||||
test: verifiers build
|
||||
|
||||
@@ -88,7 +88,7 @@ service minio start
|
||||
```
|
||||
|
||||
## Install from Source
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.13](https://golang.org/dl/#stable)
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.14](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
@@ -177,7 +177,7 @@ mc admin update <minio alias, e.g., myminio>
|
||||
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
|
||||
- `mc admin update` is disabled in kubernetes/container environments, container environments provide their own mechanisms to rollout of updates.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` to any new releases until all clusters have been successfully updated.
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)x
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)
|
||||
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
@@ -193,4 +193,4 @@ mc admin update <minio alias, e.g., myminio>
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
|
||||
## License
|
||||
Use of MinIO is governed by the Apache 2.0 License found at [LICENSE](./LICENSE).
|
||||
Use of MinIO is governed by the Apache 2.0 License found at [LICENSE](https://github.com/minio/minio/blob/master/LICENSE).
|
||||
|
||||
@@ -89,7 +89,7 @@ service minio start
|
||||
|
||||
## 使用源码安装
|
||||
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.13](https://golang.org/dl/#stable)
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.14](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
|
||||
@@ -57,22 +57,6 @@ export class BrowserDropdown extends React.Component {
|
||||
const { fetchServerInfo } = this.props
|
||||
fetchServerInfo()
|
||||
}
|
||||
fullScreen(e) {
|
||||
e.preventDefault()
|
||||
let el = document.documentElement
|
||||
if (el.requestFullscreen) {
|
||||
el.requestFullscreen()
|
||||
}
|
||||
if (el.mozRequestFullScreen) {
|
||||
el.mozRequestFullScreen()
|
||||
}
|
||||
if (el.webkitRequestFullscreen) {
|
||||
el.webkitRequestFullscreen()
|
||||
}
|
||||
if (el.msRequestFullscreen) {
|
||||
el.msRequestFullscreen()
|
||||
}
|
||||
}
|
||||
logout(e) {
|
||||
e.preventDefault()
|
||||
web.Logout()
|
||||
@@ -87,24 +71,30 @@ export class BrowserDropdown extends React.Component {
|
||||
<i className="fas fa-bars" />
|
||||
</Dropdown.Toggle>
|
||||
<Dropdown.Menu className="dropdown-menu-right">
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/?ref=ob">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://github.com/minio/minio">
|
||||
GitHub <i className="fab fa-github" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.fullScreen}>
|
||||
Fullscreen <i className="fas fa-expand" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://slack.min.io">
|
||||
Ask for help <i className="fas fa-question-circle" />
|
||||
<a target="_blank" href="https://min.io/pricing?ref=ob">
|
||||
Get Support <i className="fas fa-question-circle" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
@@ -118,20 +108,9 @@ export class BrowserDropdown extends React.Component {
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" id="logout" onClick={this.logout}>
|
||||
Sign Out <i className="fas fa-sign-out-alt" />
|
||||
Logout <i className="fas fa-sign-out-alt" />
|
||||
</a>
|
||||
</li>
|
||||
</Dropdown.Menu>
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import ObjectsSearch from "../objects/ObjectsSearch"
|
||||
import Path from "../objects/Path"
|
||||
import StorageInfo from "./StorageInfo"
|
||||
import BrowserDropdown from "./BrowserDropdown"
|
||||
@@ -27,6 +28,7 @@ export const Header = () => {
|
||||
<header className="fe-header">
|
||||
<Path />
|
||||
{loggedIn && <StorageInfo />}
|
||||
{loggedIn && <ObjectsSearch />}
|
||||
<ul className="feh-actions">
|
||||
{loggedIn ? (
|
||||
<BrowserDropdown />
|
||||
|
||||
@@ -22,7 +22,8 @@ const bucketsFilterSelector = state => state.buckets.filter
|
||||
export const getFilteredBuckets = createSelector(
|
||||
bucketsSelector,
|
||||
bucketsFilterSelector,
|
||||
(buckets, filter) => buckets.filter(bucket => bucket.indexOf(filter) > -1)
|
||||
(buckets, filter) => buckets.filter(
|
||||
bucket => bucket.toLowerCase().indexOf(filter.toLowerCase()) > -1)
|
||||
)
|
||||
|
||||
export const getCurrentBucket = state => state.buckets.currentBucket
|
||||
|
||||
@@ -18,6 +18,7 @@ import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import InfiniteScroll from "react-infinite-scroller"
|
||||
import ObjectsList from "./ObjectsList"
|
||||
import { getFilteredObjects } from "./selectors"
|
||||
|
||||
export class ObjectsListContainer extends React.Component {
|
||||
constructor(props) {
|
||||
@@ -39,22 +40,29 @@ export class ObjectsListContainer extends React.Component {
|
||||
})
|
||||
}
|
||||
}
|
||||
componentDidUpdate(prevProps) {
|
||||
if (this.props.filter !== prevProps.filter) {
|
||||
this.setState({
|
||||
page: 1
|
||||
})
|
||||
}
|
||||
}
|
||||
loadNextPage() {
|
||||
this.setState(state => {
|
||||
return { page: state.page + 1 }
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { objects, listLoading } = this.props
|
||||
const { filteredObjects, listLoading } = this.props
|
||||
|
||||
const visibleObjects = objects.slice(0, this.state.page * 100)
|
||||
const visibleObjects = filteredObjects.slice(0, this.state.page * 100)
|
||||
|
||||
return (
|
||||
<div style={{ position: "relative" }}>
|
||||
<InfiniteScroll
|
||||
pageStart={0}
|
||||
loadMore={this.loadNextPage}
|
||||
hasMore={objects.length > visibleObjects.length}
|
||||
hasMore={filteredObjects.length > visibleObjects.length}
|
||||
useWindow={true}
|
||||
initialLoad={false}
|
||||
>
|
||||
@@ -70,7 +78,8 @@ const mapStateToProps = state => {
|
||||
return {
|
||||
currentBucket: state.buckets.currentBucket,
|
||||
currentPrefix: state.objects.currentPrefix,
|
||||
objects: state.objects.list,
|
||||
filteredObjects: getFilteredObjects(state),
|
||||
filter: state.objects.filter,
|
||||
sortBy: state.objects.sortBy,
|
||||
sortOrder: state.objects.sortOrder,
|
||||
listLoading: state.objects.listLoading
|
||||
|
||||
43
browser/app/js/objects/ObjectsSearch.js
Normal file
43
browser/app/js/objects/ObjectsSearch.js
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import * as actionsObjects from "./actions"
|
||||
|
||||
export const ObjectsSearch = ({ onChange }) => (
|
||||
<div
|
||||
className="input-group ig-left ig-search-dark"
|
||||
style={{ display: "block" }}
|
||||
>
|
||||
<input
|
||||
className="ig-text"
|
||||
type="input"
|
||||
placeholder="Search Objects..."
|
||||
onChange={e => onChange(e.target.value)}
|
||||
/>
|
||||
<i className="ig-helpers" />
|
||||
</div>
|
||||
)
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
return {
|
||||
onChange: filter =>
|
||||
dispatch(actionsObjects.setFilter(filter))
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(undefined, mapDispatchToProps)(ObjectsSearch)
|
||||
@@ -20,13 +20,13 @@ import { ObjectsListContainer } from "../ObjectsListContainer"
|
||||
|
||||
describe("ObjectsList", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsListContainer objects={[]} />)
|
||||
shallow(<ObjectsListContainer filteredObjects={[]} />)
|
||||
})
|
||||
|
||||
it("should render ObjectsList with objects", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
objects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
filteredObjects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.find("ObjectsList").length).toBe(1)
|
||||
@@ -40,7 +40,7 @@ describe("ObjectsList", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
currentBucket="test1"
|
||||
objects={[]}
|
||||
filteredObjects={[]}
|
||||
listLoading={true}
|
||||
/>
|
||||
)
|
||||
|
||||
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { ObjectsSearch } from "../ObjectsSearch"
|
||||
|
||||
describe("ObjectsSearch", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsSearch />)
|
||||
})
|
||||
|
||||
it("should call onChange with search text", () => {
|
||||
const onChange = jest.fn()
|
||||
const wrapper = shallow(<ObjectsSearch onChange={onChange} />)
|
||||
wrapper.find("input").simulate("change", { target: { value: "test" } })
|
||||
expect(onChange).toHaveBeenCalledWith("test")
|
||||
})
|
||||
})
|
||||
@@ -23,6 +23,7 @@ describe("objects reducer", () => {
|
||||
const initialState = reducer(undefined, {})
|
||||
expect(initialState).toEqual({
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
|
||||
@@ -36,6 +36,7 @@ import { getServerInfo, hasServerPublicDomain } from '../browser/selectors'
|
||||
|
||||
export const SET_LIST = "objects/SET_LIST"
|
||||
export const RESET_LIST = "objects/RESET_LIST"
|
||||
export const SET_FILTER = "objects/SET_FILTER"
|
||||
export const APPEND_LIST = "objects/APPEND_LIST"
|
||||
export const REMOVE = "objects/REMOVE"
|
||||
export const SET_SORT_BY = "objects/SET_SORT_BY"
|
||||
@@ -57,6 +58,13 @@ export const resetList = () => ({
|
||||
type: RESET_LIST,
|
||||
})
|
||||
|
||||
export const setFilter = filter => {
|
||||
return {
|
||||
type: SET_FILTER,
|
||||
filter
|
||||
}
|
||||
}
|
||||
|
||||
export const setListLoading = (listLoading) => ({
|
||||
type: SET_LIST_LOADING,
|
||||
listLoading,
|
||||
|
||||
@@ -28,6 +28,7 @@ const removeObject = (list, objectToRemove, lookup) => {
|
||||
export default (
|
||||
state = {
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
@@ -53,6 +54,11 @@ export default (
|
||||
...state,
|
||||
list: []
|
||||
}
|
||||
case actionsObjects.SET_FILTER:
|
||||
return {
|
||||
...state,
|
||||
filter: action.filter
|
||||
}
|
||||
case actionsObjects.SET_LIST_LOADING:
|
||||
return {
|
||||
...state,
|
||||
|
||||
@@ -21,3 +21,13 @@ export const getCurrentPrefix = state => state.objects.currentPrefix
|
||||
export const getCheckedList = state => state.objects.checkedList
|
||||
|
||||
export const getPrefixWritable = state => state.objects.prefixWritable
|
||||
|
||||
const objectsSelector = state => state.objects.list
|
||||
const objectsFilterSelector = state => state.objects.filter
|
||||
|
||||
export const getFilteredObjects = createSelector(
|
||||
objectsSelector,
|
||||
objectsFilterSelector,
|
||||
(objects, filter) => objects.filter(
|
||||
object => object.name.toLowerCase().startsWith(filter.toLowerCase()))
|
||||
)
|
||||
@@ -36,7 +36,7 @@ export class Dropzone extends React.Component {
|
||||
// Overwrite the default styling from react-dropzone; otherwise it
|
||||
// won't handle child elements correctly.
|
||||
const style = {
|
||||
height: "100%",
|
||||
flex: "1",
|
||||
borderWidth: "0",
|
||||
borderStyle: "dashed",
|
||||
borderColor: "#fff"
|
||||
|
||||
@@ -20,7 +20,8 @@
|
||||
@media(max-width: @screen-sm-max) {
|
||||
padding: 75px 0 80px;
|
||||
}
|
||||
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
min-height:100vh;
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
@@ -169,6 +169,24 @@ select.form-control {
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search-dark {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
font-weight: 900;
|
||||
content: '\f002';
|
||||
font-size: 15px;
|
||||
position: absolute;
|
||||
left: 2px;
|
||||
top: 8px;
|
||||
color: rgba(0, 0, 0, 0.5);
|
||||
}
|
||||
|
||||
.ig-text {
|
||||
padding-left: 25px;
|
||||
.placeholder(rgba(0, 0, 0, 0.5))
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
@@ -270,4 +288,4 @@ select.form-control {
|
||||
.set-expire-decrease {
|
||||
bottom: -27px;
|
||||
.rotate(-180deg);
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -45,88 +45,63 @@ FUNCTIONAL_TESTS="$WORK_DIR/functional-tests.sh"
|
||||
function start_minio_fs()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 10
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure_sets()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk-sets{1...32}" >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets_ipv6()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address=:9002 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function run_test_fs()
|
||||
{
|
||||
minio_pid="$(start_minio_fs)"
|
||||
start_minio_fs
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -138,12 +113,12 @@ function run_test_fs()
|
||||
}
|
||||
|
||||
function run_test_erasure_sets() {
|
||||
minio_pid="$(start_minio_erasure_sets)"
|
||||
start_minio_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -156,14 +131,12 @@ function run_test_erasure_sets() {
|
||||
|
||||
function run_test_zone_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets) )
|
||||
start_minio_zone_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -182,16 +155,14 @@ function run_test_zone_erasure_sets()
|
||||
|
||||
function run_test_zone_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets_ipv6) )
|
||||
start_minio_zone_erasure_sets_ipv6
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -210,12 +181,12 @@ function run_test_zone_erasure_sets_ipv6()
|
||||
|
||||
function run_test_erasure()
|
||||
{
|
||||
minio_pid="$(start_minio_erasure)"
|
||||
start_minio_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -228,14 +199,12 @@ function run_test_erasure()
|
||||
|
||||
function run_test_dist_erasure()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure) )
|
||||
start_minio_dist_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
|
||||
@@ -226,6 +226,11 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
if bucket != "" {
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if _, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
@@ -42,7 +42,7 @@ import (
|
||||
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return auth.Credentials{}, nil
|
||||
|
||||
@@ -35,7 +35,7 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
var adminAPIErr APIErrorCode
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, cred
|
||||
@@ -386,7 +386,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
@@ -465,7 +465,7 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
@@ -520,7 +520,7 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
@@ -579,7 +579,7 @@ func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http
|
||||
defer logger.AuditLog(w, r, "AccountUsageInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
@@ -722,7 +722,10 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(policy)
|
||||
if err = json.NewEncoder(w).Encode(policy); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
|
||||
@@ -35,9 +35,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -54,16 +52,13 @@ const (
|
||||
maxEConfigJSONSize = 262272
|
||||
)
|
||||
|
||||
// Type-safe query params.
|
||||
type mgmtQueryKey string
|
||||
|
||||
// Only valid query params for mgmt admin APIs.
|
||||
const (
|
||||
mgmtBucket mgmtQueryKey = "bucket"
|
||||
mgmtPrefix = "prefix"
|
||||
mgmtClientToken = "clientToken"
|
||||
mgmtForceStart = "forceStart"
|
||||
mgmtForceStop = "forceStop"
|
||||
mgmtBucket = "bucket"
|
||||
mgmtPrefix = "prefix"
|
||||
mgmtClientToken = "clientToken"
|
||||
mgmtForceStart = "forceStart"
|
||||
mgmtForceStop = "forceStop"
|
||||
)
|
||||
|
||||
func updateServer(u *url.URL, sha256Sum []byte, lrTime time.Time, mode string) (us madmin.ServerUpdateStatus, err error) {
|
||||
@@ -298,6 +293,20 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
// ignores any errors here.
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx, false)
|
||||
|
||||
// Collect any disk healing.
|
||||
healing, _ := getAggregatedBackgroundHealState(ctx)
|
||||
healDisks := make(map[string]struct{}, len(healing.HealDisks))
|
||||
for _, disk := range healing.HealDisks {
|
||||
healDisks[disk] = struct{}{}
|
||||
}
|
||||
|
||||
// find all disks which belong to each respective endpoints
|
||||
for i, disk := range storageInfo.Disks {
|
||||
if _, ok := healDisks[disk.Endpoint]; ok {
|
||||
storageInfo.Disks[i].Healing = true
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(storageInfo)
|
||||
if err != nil {
|
||||
@@ -339,23 +348,26 @@ func (a adminAPIHandlers) DataUsageInfoHandler(w http.ResponseWriter, r *http.Re
|
||||
writeSuccessResponseJSON(w, dataUsageInfoJSON)
|
||||
}
|
||||
|
||||
func lriToLockEntry(l lockRequesterInfo, resource, server string) *madmin.LockEntry {
|
||||
func lriToLockEntry(l lockRequesterInfo, resource, server string, rquorum, wquorum int) *madmin.LockEntry {
|
||||
entry := &madmin.LockEntry{
|
||||
Timestamp: l.Timestamp,
|
||||
Resource: resource,
|
||||
ServerList: []string{server},
|
||||
Source: l.Source,
|
||||
Owner: l.Owner,
|
||||
ID: l.UID,
|
||||
}
|
||||
if l.Writer {
|
||||
entry.Type = "WRITE"
|
||||
entry.Quorum = wquorum
|
||||
} else {
|
||||
entry.Type = "READ"
|
||||
entry.Quorum = rquorum
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
func topLockEntries(peerLocks []*PeerLocks, count int) madmin.LockEntries {
|
||||
func topLockEntries(peerLocks []*PeerLocks, rquorum, wquorum int, stale bool) madmin.LockEntries {
|
||||
entryMap := make(map[string]*madmin.LockEntry)
|
||||
for _, peerLock := range peerLocks {
|
||||
if peerLock == nil {
|
||||
@@ -367,20 +379,23 @@ func topLockEntries(peerLocks []*PeerLocks, count int) madmin.LockEntries {
|
||||
if val, ok := entryMap[lockReqInfo.UID]; ok {
|
||||
val.ServerList = append(val.ServerList, peerLock.Addr)
|
||||
} else {
|
||||
entryMap[lockReqInfo.UID] = lriToLockEntry(lockReqInfo, k, peerLock.Addr)
|
||||
entryMap[lockReqInfo.UID] = lriToLockEntry(lockReqInfo, k, peerLock.Addr, rquorum, wquorum)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
var lockEntries = make(madmin.LockEntries, 0, len(entryMap))
|
||||
var lockEntries madmin.LockEntries
|
||||
for _, v := range entryMap {
|
||||
lockEntries = append(lockEntries, *v)
|
||||
if stale {
|
||||
lockEntries = append(lockEntries, *v)
|
||||
continue
|
||||
}
|
||||
if len(v.ServerList) >= v.Quorum {
|
||||
lockEntries = append(lockEntries, *v)
|
||||
}
|
||||
}
|
||||
sort.Sort(lockEntries)
|
||||
if len(lockEntries) > count {
|
||||
lockEntries = lockEntries[:count]
|
||||
}
|
||||
return lockEntries
|
||||
}
|
||||
|
||||
@@ -410,23 +425,20 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
}
|
||||
stale := r.URL.Query().Get("stale") == "true" // list also stale locks
|
||||
|
||||
peerLocks := globalNotificationSys.GetLocks(ctx)
|
||||
// Once we have received all the locks currently used from peers
|
||||
// add the local peer locks list as well.
|
||||
var getRespLocks GetLocksResp
|
||||
for _, llocker := range globalLockServers {
|
||||
getRespLocks = append(getRespLocks, llocker.DupLockMap())
|
||||
peerLocks := globalNotificationSys.GetLocks(ctx, r)
|
||||
|
||||
rquorum := getReadQuorum(objectAPI.SetDriveCount())
|
||||
wquorum := getWriteQuorum(objectAPI.SetDriveCount())
|
||||
|
||||
topLocks := topLockEntries(peerLocks, rquorum, wquorum, stale)
|
||||
|
||||
// Marshal API response upto requested count.
|
||||
if len(topLocks) > count && count > 0 {
|
||||
topLocks = topLocks[:count]
|
||||
}
|
||||
|
||||
peerLocks = append(peerLocks, &PeerLocks{
|
||||
Addr: getHostName(r),
|
||||
Locks: getRespLocks,
|
||||
})
|
||||
|
||||
topLocks := topLockEntries(peerLocks, count)
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(topLocks)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -572,8 +584,8 @@ type healInitParams struct {
|
||||
|
||||
// extractHealInitParams - Validates params for heal init API.
|
||||
func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reader) (hip healInitParams, err APIErrorCode) {
|
||||
hip.bucket = vars[string(mgmtBucket)]
|
||||
hip.objPrefix = vars[string(mgmtPrefix)]
|
||||
hip.bucket = vars[mgmtBucket]
|
||||
hip.objPrefix = vars[mgmtPrefix]
|
||||
|
||||
if hip.bucket == "" {
|
||||
if hip.objPrefix != "" {
|
||||
@@ -592,13 +604,13 @@ func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reade
|
||||
return
|
||||
}
|
||||
|
||||
if len(qParms[string(mgmtClientToken)]) > 0 {
|
||||
hip.clientToken = qParms[string(mgmtClientToken)][0]
|
||||
if len(qParms[mgmtClientToken]) > 0 {
|
||||
hip.clientToken = qParms[mgmtClientToken][0]
|
||||
}
|
||||
if _, ok := qParms[string(mgmtForceStart)]; ok {
|
||||
if _, ok := qParms[mgmtForceStart]; ok {
|
||||
hip.forceStart = true
|
||||
}
|
||||
if _, ok := qParms[string(mgmtForceStop)]; ok {
|
||||
if _, ok := qParms[mgmtForceStop]; ok {
|
||||
hip.forceStop = true
|
||||
}
|
||||
|
||||
@@ -799,6 +811,59 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
keepConnLive(w, r, respCh)
|
||||
}
|
||||
|
||||
func getAggregatedBackgroundHealState(ctx context.Context) (madmin.BgHealState, error) {
|
||||
var bgHealStates []madmin.BgHealState
|
||||
|
||||
localHealState, ok := getLocalBackgroundHealStatus()
|
||||
if !ok {
|
||||
return madmin.BgHealState{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
// Get local heal status first
|
||||
bgHealStates = append(bgHealStates, localHealState)
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Get heal status from other peers
|
||||
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus()
|
||||
var errCount int
|
||||
for _, nerr := range nerrs {
|
||||
if nerr.Err != nil {
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
errCount++
|
||||
}
|
||||
}
|
||||
if errCount == len(nerrs) {
|
||||
return madmin.BgHealState{}, fmt.Errorf("all remote servers failed to report heal status, cluster is unhealthy")
|
||||
}
|
||||
bgHealStates = append(bgHealStates, peersHealStates...)
|
||||
}
|
||||
|
||||
// Aggregate healing result
|
||||
var aggregatedHealStateResult = madmin.BgHealState{
|
||||
ScannedItemsCount: bgHealStates[0].ScannedItemsCount,
|
||||
LastHealActivity: bgHealStates[0].LastHealActivity,
|
||||
NextHealRound: bgHealStates[0].NextHealRound,
|
||||
HealDisks: bgHealStates[0].HealDisks,
|
||||
}
|
||||
|
||||
bgHealStates = bgHealStates[1:]
|
||||
|
||||
for _, state := range bgHealStates {
|
||||
aggregatedHealStateResult.ScannedItemsCount += state.ScannedItemsCount
|
||||
aggregatedHealStateResult.HealDisks = append(aggregatedHealStateResult.HealDisks, state.HealDisks...)
|
||||
if !state.LastHealActivity.IsZero() && aggregatedHealStateResult.LastHealActivity.Before(state.LastHealActivity) {
|
||||
aggregatedHealStateResult.LastHealActivity = state.LastHealActivity
|
||||
// The node which has the last heal activity means its
|
||||
// is the node that is orchestrating self healing operations,
|
||||
// which also means it is the same node which decides when
|
||||
// the next self healing operation will be done.
|
||||
aggregatedHealStateResult.NextHealRound = state.NextHealRound
|
||||
}
|
||||
}
|
||||
|
||||
return aggregatedHealStateResult, nil
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HealBackgroundStatus")
|
||||
|
||||
@@ -815,39 +880,13 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
var bgHealStates []madmin.BgHealState
|
||||
|
||||
// Get local heal status first
|
||||
bgHealStates = append(bgHealStates, getLocalBackgroundHealStatus())
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Get heal status from other peers
|
||||
peersHealStates := globalNotificationSys.BackgroundHealStatus()
|
||||
bgHealStates = append(bgHealStates, peersHealStates...)
|
||||
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context())
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Aggregate healing result
|
||||
var aggregatedHealStateResult = madmin.BgHealState{
|
||||
ScannedItemsCount: bgHealStates[0].ScannedItemsCount,
|
||||
LastHealActivity: bgHealStates[0].LastHealActivity,
|
||||
NextHealRound: bgHealStates[0].NextHealRound,
|
||||
}
|
||||
|
||||
bgHealStates = bgHealStates[1:]
|
||||
|
||||
for _, state := range bgHealStates {
|
||||
aggregatedHealStateResult.ScannedItemsCount += state.ScannedItemsCount
|
||||
if !state.LastHealActivity.IsZero() && aggregatedHealStateResult.LastHealActivity.Before(state.LastHealActivity) {
|
||||
aggregatedHealStateResult.LastHealActivity = state.LastHealActivity
|
||||
// The node which has the last heal activity means its
|
||||
// is the node that is orchestrating self healing operations,
|
||||
// which also means it is the same node which decides when
|
||||
// the next self healing operation will be done.
|
||||
aggregatedHealStateResult.NextHealRound = state.NextHealRound
|
||||
}
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode(aggregatedHealStateResult); err != nil {
|
||||
if err := json.NewEncoder(w).Encode(aggregateHealStateResult); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -859,7 +898,7 @@ func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Reques
|
||||
var cred auth.Credentials
|
||||
var adminAPIErr APIErrorCode
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, cred
|
||||
@@ -1063,7 +1102,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
// Avoid reusing tcp connection if read timeout is hit
|
||||
// This is needed to make r.Context().Done() work as
|
||||
// expected in case of read timeout
|
||||
w.Header().Add("Connection", "close")
|
||||
w.Header().Set("Connection", "close")
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
|
||||
@@ -1214,7 +1253,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
query := r.URL.Query()
|
||||
obdInfo := madmin.OBDInfo{}
|
||||
obdInfoCh := make(chan madmin.OBDInfo)
|
||||
|
||||
@@ -1248,10 +1287,9 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
deadlinedCtx, cancel := context.WithTimeout(ctx, deadline)
|
||||
|
||||
defer cancel()
|
||||
|
||||
nsLock := objectAPI.NewNSLock(deadlinedCtx, minioMetaBucket, "obd-in-progress")
|
||||
nsLock := objectAPI.NewNSLock(ctx, minioMetaBucket, "obd-in-progress")
|
||||
if err := nsLock.GetLock(newDynamicTimeout(deadline, deadline)); err != nil { // returns a locked lock
|
||||
errResp(err)
|
||||
return
|
||||
@@ -1261,7 +1299,13 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
go func() {
|
||||
defer close(obdInfoCh)
|
||||
|
||||
if cpu, ok := vars["syscpu"]; ok && cpu == "true" {
|
||||
if log := query.Get("log"); log == "true" {
|
||||
obdInfo.Logging.ServersLog = append(obdInfo.Logging.ServersLog, getLocalLogOBD(deadlinedCtx, r))
|
||||
obdInfo.Logging.ServersLog = append(obdInfo.Logging.ServersLog, globalNotificationSys.LogOBDInfo(deadlinedCtx)...)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if cpu := query.Get("syscpu"); cpu == "true" {
|
||||
cpuInfo := getLocalCPUOBDInfo(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.CPUInfo = append(obdInfo.Sys.CPUInfo, cpuInfo)
|
||||
@@ -1269,7 +1313,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if diskHw, ok := vars["sysdiskhw"]; ok && diskHw == "true" {
|
||||
if diskHw := query.Get("sysdiskhw"); diskHw == "true" {
|
||||
diskHwInfo := getLocalDiskHwOBD(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.DiskHwInfo = append(obdInfo.Sys.DiskHwInfo, diskHwInfo)
|
||||
@@ -1277,7 +1321,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if osInfo, ok := vars["sysosinfo"]; ok && osInfo == "true" {
|
||||
if osInfo := query.Get("sysosinfo"); osInfo == "true" {
|
||||
osInfo := getLocalOsInfoOBD(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.OsInfo = append(obdInfo.Sys.OsInfo, osInfo)
|
||||
@@ -1285,7 +1329,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if mem, ok := vars["sysmem"]; ok && mem == "true" {
|
||||
if mem := query.Get("sysmem"); mem == "true" {
|
||||
memInfo := getLocalMemOBD(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.MemInfo = append(obdInfo.Sys.MemInfo, memInfo)
|
||||
@@ -1293,7 +1337,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if proc, ok := vars["sysprocess"]; ok && proc == "true" {
|
||||
if proc := query.Get("sysprocess"); proc == "true" {
|
||||
procInfo := getLocalProcOBD(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.ProcInfo = append(obdInfo.Sys.ProcInfo, procInfo)
|
||||
@@ -1301,14 +1345,14 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if config, ok := vars["minioconfig"]; ok && config == "true" {
|
||||
if config := query.Get("minioconfig"); config == "true" {
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
logger.LogIf(ctx, err)
|
||||
obdInfo.Minio.Config = cfg
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if drive, ok := vars["perfdrive"]; ok && drive == "true" {
|
||||
if drive := query.Get("perfdrive"); drive == "true" {
|
||||
// Get drive obd details from local server's drive(s)
|
||||
driveOBDSerial := getLocalDrivesOBD(deadlinedCtx, false, globalEndpoints, r)
|
||||
driveOBDParallel := getLocalDrivesOBD(deadlinedCtx, true, globalEndpoints, r)
|
||||
@@ -1339,7 +1383,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if net, ok := vars["perfnet"]; ok && net == "true" && globalIsDistErasure {
|
||||
if net := query.Get("perfnet"); net == "true" && globalIsDistErasure {
|
||||
obdInfo.Perf.Net = append(obdInfo.Perf.Net, globalNotificationSys.NetOBDInfo(deadlinedCtx))
|
||||
partialWrite(obdInfo)
|
||||
|
||||
@@ -1353,6 +1397,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
obdInfo.Perf.NetParallel = globalNotificationSys.NetOBDParallelInfo(deadlinedCtx)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
@@ -1379,6 +1424,31 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
}
|
||||
|
||||
// BandwidthMonitorHandler - GET /minio/admin/v3/bandwidth
|
||||
// ----------
|
||||
// Get bandwidth consumption information
|
||||
func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "BandwidthMonitor")
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.BandwidthMonitorAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
bucketsRequestedString := r.URL.Query().Get("buckets")
|
||||
bucketsRequested := strings.Split(bucketsRequestedString, ",")
|
||||
consolidatedReport := globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...)
|
||||
enc := json.NewEncoder(w)
|
||||
err := enc.Encode(consolidatedReport)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ServerInfoHandler - GET /minio/admin/v3/info
|
||||
// ----------
|
||||
// Get server information
|
||||
@@ -1392,12 +1462,6 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
buckets := madmin.Buckets{}
|
||||
objects := madmin.Objects{}
|
||||
usage := madmin.Usage{}
|
||||
@@ -1409,7 +1473,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
|
||||
}
|
||||
|
||||
vault := fetchVaultStatus(cfg)
|
||||
vault := fetchVaultStatus()
|
||||
|
||||
ldap := madmin.LDAP{}
|
||||
if globalLDAPConfig.Enabled {
|
||||
@@ -1425,10 +1489,10 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
|
||||
log, audit := fetchLoggerInfo(cfg)
|
||||
log, audit := fetchLoggerInfo()
|
||||
|
||||
// Get the notification target info
|
||||
notifyTarget := fetchLambdaInfo(cfg)
|
||||
notifyTarget := fetchLambdaInfo()
|
||||
|
||||
// Fetching the Storage information, ignore any errors.
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx, false)
|
||||
@@ -1450,11 +1514,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
|
||||
mode := "safemode"
|
||||
if newObjectLayerFn() != nil {
|
||||
mode = "online"
|
||||
}
|
||||
|
||||
mode := "online"
|
||||
server := getLocalServerProperty(globalEndpoints, r)
|
||||
servers := globalNotificationSys.ServerInfo()
|
||||
servers = append(servers, server)
|
||||
@@ -1476,8 +1536,12 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add all the disks local to this server.
|
||||
for _, disk := range storageInfo.Disks {
|
||||
if disk.DrivePath == "" && disk.Endpoint == "" {
|
||||
continue
|
||||
}
|
||||
if disk.Endpoint == disk.DrivePath {
|
||||
servers[len(servers)-1].Disks = append(servers[len(servers)-1].Disks, disk)
|
||||
}
|
||||
@@ -1504,27 +1568,33 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
//Reply with storage information (across nodes in a
|
||||
// Reply with storage information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
}
|
||||
|
||||
func fetchLambdaInfo(cfg config.Config) []map[string][]madmin.TargetIDStatus {
|
||||
|
||||
// Fetch the configured targets
|
||||
tr := NewGatewayHTTPTransport()
|
||||
defer tr.CloseIdleConnections()
|
||||
targetList, err := notify.FetchRegisteredTargets(cfg, GlobalContext.Done(), tr, true, false)
|
||||
if err != nil && err != notify.ErrTargetsOffline {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return nil
|
||||
}
|
||||
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
|
||||
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
||||
|
||||
for targetID, target := range targetList.TargetMap() {
|
||||
for _, tgt := range globalConfigTargetList.Targets() {
|
||||
targetIDStatus := make(map[string]madmin.Status)
|
||||
active, _ := target.IsActive()
|
||||
active, _ := tgt.IsActive()
|
||||
targetID := tgt.ID()
|
||||
if active {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
|
||||
} else {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Offline"}
|
||||
}
|
||||
list := lambdaMap[targetID.Name]
|
||||
list = append(list, targetIDStatus)
|
||||
lambdaMap[targetID.Name] = list
|
||||
}
|
||||
|
||||
for _, tgt := range globalEnvTargetList.Targets() {
|
||||
targetIDStatus := make(map[string]madmin.Status)
|
||||
active, _ := tgt.IsActive()
|
||||
targetID := tgt.ID()
|
||||
if active {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
|
||||
} else {
|
||||
@@ -1533,8 +1603,6 @@ func fetchLambdaInfo(cfg config.Config) []map[string][]madmin.TargetIDStatus {
|
||||
list := lambdaMap[targetID.Name]
|
||||
list = append(list, targetIDStatus)
|
||||
lambdaMap[targetID.Name] = list
|
||||
// Close any leaking connections
|
||||
_ = target.Close()
|
||||
}
|
||||
|
||||
notify := make([]map[string][]madmin.TargetIDStatus, len(lambdaMap))
|
||||
@@ -1549,7 +1617,7 @@ func fetchLambdaInfo(cfg config.Config) []map[string][]madmin.TargetIDStatus {
|
||||
}
|
||||
|
||||
// fetchVaultStatus fetches Vault Info
|
||||
func fetchVaultStatus(cfg config.Config) madmin.Vault {
|
||||
func fetchVaultStatus() madmin.Vault {
|
||||
vault := madmin.Vault{}
|
||||
if GlobalKMS == nil {
|
||||
vault.Status = "disabled"
|
||||
@@ -1558,12 +1626,12 @@ func fetchVaultStatus(cfg config.Config) madmin.Vault {
|
||||
keyID := GlobalKMS.DefaultKeyID()
|
||||
kmsInfo := GlobalKMS.Info()
|
||||
|
||||
if kmsInfo.Endpoint == "" {
|
||||
if len(kmsInfo.Endpoints) == 0 {
|
||||
vault.Status = "KMS configured using master key"
|
||||
return vault
|
||||
}
|
||||
|
||||
if err := checkConnection(kmsInfo.Endpoint, 15*time.Second); err != nil {
|
||||
if err := checkConnection(kmsInfo.Endpoints[0], 15*time.Second); err != nil {
|
||||
vault.Status = "offline"
|
||||
} else {
|
||||
vault.Status = "online"
|
||||
@@ -1592,41 +1660,42 @@ func fetchVaultStatus(cfg config.Config) madmin.Vault {
|
||||
}
|
||||
|
||||
// fetchLoggerDetails return log info
|
||||
func fetchLoggerInfo(cfg config.Config) ([]madmin.Logger, []madmin.Audit) {
|
||||
loggerCfg, _ := logger.LookupConfig(cfg)
|
||||
|
||||
var logger []madmin.Logger
|
||||
var auditlogger []madmin.Audit
|
||||
for log, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
err := checkConnection(l.Endpoint, 15*time.Second)
|
||||
func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
var loggerInfo []madmin.Logger
|
||||
var auditloggerInfo []madmin.Audit
|
||||
for _, target := range logger.Targets {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
if err == nil {
|
||||
mapLog := make(map[string]madmin.Status)
|
||||
mapLog[log] = madmin.Status{Status: "Online"}
|
||||
logger = append(logger, mapLog)
|
||||
mapLog[tgt] = madmin.Status{Status: "Online"}
|
||||
loggerInfo = append(loggerInfo, mapLog)
|
||||
} else {
|
||||
mapLog := make(map[string]madmin.Status)
|
||||
mapLog[log] = madmin.Status{Status: "offline"}
|
||||
logger = append(logger, mapLog)
|
||||
mapLog[tgt] = madmin.Status{Status: "offline"}
|
||||
loggerInfo = append(loggerInfo, mapLog)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for audit, l := range loggerCfg.Audit {
|
||||
if l.Enabled {
|
||||
err := checkConnection(l.Endpoint, 15*time.Second)
|
||||
for _, target := range logger.AuditTargets {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
if err == nil {
|
||||
mapAudit := make(map[string]madmin.Status)
|
||||
mapAudit[audit] = madmin.Status{Status: "Online"}
|
||||
auditlogger = append(auditlogger, mapAudit)
|
||||
mapAudit[tgt] = madmin.Status{Status: "Online"}
|
||||
auditloggerInfo = append(auditloggerInfo, mapAudit)
|
||||
} else {
|
||||
mapAudit := make(map[string]madmin.Status)
|
||||
mapAudit[audit] = madmin.Status{Status: "Offline"}
|
||||
auditlogger = append(auditlogger, mapAudit)
|
||||
mapAudit[tgt] = madmin.Status{Status: "Offline"}
|
||||
auditloggerInfo = append(auditloggerInfo, mapAudit)
|
||||
}
|
||||
}
|
||||
}
|
||||
return logger, auditlogger
|
||||
|
||||
return loggerInfo, auditloggerInfo
|
||||
}
|
||||
|
||||
// checkConnection - ping an endpoint , return err in case of no connection
|
||||
@@ -1634,11 +1703,6 @@ func checkConnection(endpointStr string, timeout time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, timeout)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequest(http.MethodHead, endpointStr, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := &http.Client{Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: xhttp.NewCustomDialContext(timeout),
|
||||
@@ -1653,11 +1717,15 @@ func checkConnection(endpointStr string, timeout time.Duration) error {
|
||||
}}
|
||||
defer client.CloseIdleConnections()
|
||||
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, endpointStr, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer xhttp.DrainBody(resp.Body)
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -327,13 +327,13 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
mkParams := func(clientToken string, forceStart, forceStop bool) url.Values {
|
||||
v := url.Values{}
|
||||
if clientToken != "" {
|
||||
v.Add(string(mgmtClientToken), clientToken)
|
||||
v.Add(mgmtClientToken, clientToken)
|
||||
}
|
||||
if forceStart {
|
||||
v.Add(string(mgmtForceStart), "")
|
||||
v.Add(mgmtForceStart, "")
|
||||
}
|
||||
if forceStop {
|
||||
v.Add(string(mgmtForceStop), "")
|
||||
v.Add(mgmtForceStop, "")
|
||||
}
|
||||
return v
|
||||
}
|
||||
@@ -351,11 +351,11 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
}
|
||||
varsArr := []map[string]string{
|
||||
// Invalid cases
|
||||
{string(mgmtPrefix): "objprefix"},
|
||||
{mgmtPrefix: "objprefix"},
|
||||
// Valid cases
|
||||
{},
|
||||
{string(mgmtBucket): "bucket"},
|
||||
{string(mgmtBucket): "bucket", string(mgmtPrefix): "objprefix"},
|
||||
{mgmtBucket: "bucket"},
|
||||
{mgmtBucket: "bucket", mgmtPrefix: "objprefix"},
|
||||
}
|
||||
|
||||
// Body is always valid - we do not test JSON decoding.
|
||||
|
||||
@@ -85,16 +85,18 @@ type healSequenceStatus struct {
|
||||
|
||||
// structure to hold state of all heal sequences in server memory
|
||||
type allHealState struct {
|
||||
sync.Mutex
|
||||
sync.RWMutex
|
||||
|
||||
// map of heal path to heal sequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healLocalDisks map[Endpoint]struct{}
|
||||
}
|
||||
|
||||
// newHealState - initialize global heal state management
|
||||
func newHealState() *allHealState {
|
||||
healState := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healLocalDisks: map[Endpoint]struct{}{},
|
||||
}
|
||||
|
||||
go healState.periodicHealSeqsClean(GlobalContext)
|
||||
@@ -102,6 +104,42 @@ func newHealState() *allHealState {
|
||||
return healState
|
||||
}
|
||||
|
||||
func (ahs *allHealState) healDriveCount() int {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
return len(ahs.healLocalDisks)
|
||||
}
|
||||
|
||||
func (ahs *allHealState) getHealLocalDisks() Endpoints {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
var endpoints Endpoints
|
||||
for ep := range ahs.healLocalDisks {
|
||||
endpoints = append(endpoints, ep)
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
delete(ahs.healLocalDisks, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
ahs.healLocalDisks[ep] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
@@ -485,9 +523,12 @@ func (h *healSequence) isQuitting() bool {
|
||||
// check if the heal sequence has ended
|
||||
func (h *healSequence) hasEnded() bool {
|
||||
h.mutex.RLock()
|
||||
ended := len(h.currentStatus.Items) == 0 || h.currentStatus.Summary == healStoppedStatus || h.currentStatus.Summary == healFinishedStatus
|
||||
h.mutex.RUnlock()
|
||||
return ended
|
||||
defer h.mutex.RUnlock()
|
||||
// background heal never ends
|
||||
if h.clientToken == bgHealingUUID {
|
||||
return false
|
||||
}
|
||||
return !h.endTime.IsZero()
|
||||
}
|
||||
|
||||
// stops the heal sequence - safe to call multiple times.
|
||||
@@ -627,6 +668,12 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
|
||||
globalBackgroundHealRoutine.queueHealTask(task)
|
||||
|
||||
select {
|
||||
@@ -634,9 +681,11 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
if !h.reportProgress {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and
|
||||
// return success.
|
||||
// return the error and not calculate this object
|
||||
// as part of the metrics.
|
||||
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
|
||||
return nil
|
||||
// Return the error so that caller can handle it.
|
||||
return res.err
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
@@ -700,14 +749,13 @@ func (h *healSequence) healItemsFromSourceCh() error {
|
||||
if err := h.queueHealTask(source, itemType); err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectExistsAsDirectory:
|
||||
case ObjectNotFound:
|
||||
case VersionNotFound:
|
||||
default:
|
||||
logger.LogIf(h.ctx, fmt.Errorf("Heal attempt failed for %s: %w",
|
||||
pathJoin(source.bucket, source.object), err))
|
||||
}
|
||||
}
|
||||
|
||||
h.scannedItemsMap[itemType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
@@ -760,7 +808,7 @@ func (h *healSequence) traverseAndHeal() {
|
||||
func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
return func() error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -773,17 +821,17 @@ func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
herr := h.queueHealTask(healSource{
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemBucketMetadata)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if isErrObjectNotFound(herr) || isErrVersionNotFound(herr) {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return herr
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -796,7 +844,7 @@ func (h *healSequence) healDiskFormat() error {
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -816,7 +864,7 @@ func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -838,13 +886,15 @@ func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
// healBucket - traverses and heals given bucket
|
||||
func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
|
||||
return err
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if bucketsOnly {
|
||||
@@ -855,9 +905,12 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
if h.object != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
// and if so heal it.
|
||||
_, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
oi, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
if err == nil {
|
||||
if err = h.healObject(bucket, h.object, ""); err != nil {
|
||||
if err = h.healObject(bucket, h.object, oi.VersionID); err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -867,7 +920,11 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -875,7 +932,7 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
// healObject - heal the given object and record result
|
||||
func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -884,9 +941,10 @@ func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
return h.queueHealTask(healSource{
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -180,19 +180,19 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
}
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
}
|
||||
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
}
|
||||
}
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
if globalIsDistErasure {
|
||||
@@ -212,22 +212,14 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
if !globalIsGateway {
|
||||
// -- OBD API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
|
||||
Queries("perfdrive", "{perfdrive:true|false}",
|
||||
"perfnet", "{perfnet:true|false}",
|
||||
"minioinfo", "{minioinfo:true|false}",
|
||||
"minioconfig", "{minioconfig:true|false}",
|
||||
"syscpu", "{syscpu:true|false}",
|
||||
"sysdiskhw", "{sysdiskhw:true|false}",
|
||||
"sysosinfo", "{sysosinfo:true|false}",
|
||||
"sysmem", "{sysmem:true|false}",
|
||||
"sysprocess", "{sysprocess:true|false}",
|
||||
)
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/bandwidth").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.BandwidthMonitorHandler))
|
||||
}
|
||||
}
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.NotFoundHandler = httpTraceAll(errorResponseHandler)
|
||||
adminRouter.MethodNotAllowedHandler = httpTraceAll(errorResponseHandler)
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
@@ -106,15 +106,17 @@ const (
|
||||
ErrNoSuchCORSConfiguration
|
||||
ErrNoSuchWebsiteConfiguration
|
||||
ErrReplicationConfigurationNotFoundError
|
||||
ErrReplicationDestinationNotFoundError
|
||||
ErrRemoteDestinationNotFoundError
|
||||
ErrReplicationDestinationMissingLock
|
||||
ErrReplicationTargetNotFoundError
|
||||
ErrRemoteTargetNotFoundError
|
||||
ErrReplicationRemoteConnectionError
|
||||
ErrBucketRemoteIdenticalToSource
|
||||
ErrBucketRemoteAlreadyExists
|
||||
ErrBucketRemoteLabelInUse
|
||||
ErrBucketRemoteArnTypeInvalid
|
||||
ErrBucketRemoteArnInvalid
|
||||
ErrBucketRemoteRemoveDisallowed
|
||||
ErrReplicationTargetNotVersionedError
|
||||
ErrRemoteTargetNotVersionedError
|
||||
ErrReplicationSourceNotVersionedError
|
||||
ErrReplicationNeedsVersioningError
|
||||
ErrReplicationBucketNeedsVersioningError
|
||||
@@ -663,20 +665,9 @@ var errorCodes = errorCodeMap{
|
||||
Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrMalformedCredentialDate: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; the region 'us-east-' is wrong; expecting 'us-east-1'".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrMalformedCredentialRegion: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; the region is wrong;",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format. This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRegion: {
|
||||
@@ -684,9 +675,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Region does not match.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect service \"s4\". This endpoint belongs to \"s3\".".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrInvalidServiceS3: {
|
||||
Code: "AuthorizationParametersError",
|
||||
Description: "Error parsing the Credential/X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
|
||||
@@ -697,9 +685,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Error parsing the Credential parameter; incorrect service. This endpoint belongs to \"sts\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal "aws4_reque". This endpoint uses "aws4_request".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrInvalidRequestVersion: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal. This endpoint uses \"aws4_request\".",
|
||||
@@ -770,8 +755,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Your key is too long",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// FIXME: Actual XML error response also contains the header which missed in list of signed header parameters.
|
||||
ErrUnsignedHeaders: {
|
||||
Code: "AccessDenied",
|
||||
Description: "There were headers present in the request which were not signed",
|
||||
@@ -827,9 +810,9 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The replication configuration was not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationDestinationNotFoundError: {
|
||||
Code: "ReplicationDestinationNotFoundError",
|
||||
Description: "The replication destination bucket does not exist",
|
||||
ErrRemoteDestinationNotFoundError: {
|
||||
Code: "RemoteDestinationNotFoundError",
|
||||
Description: "The remote destination bucket does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationDestinationMissingLock: {
|
||||
@@ -837,24 +820,34 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The replication destination bucket does not have object locking enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationTargetNotFoundError: {
|
||||
Code: "XminioAdminReplicationTargetNotFoundError",
|
||||
Description: "The replication target does not exist",
|
||||
ErrRemoteTargetNotFoundError: {
|
||||
Code: "XMinioAdminRemoteTargetNotFoundError",
|
||||
Description: "The remote target does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationRemoteConnectionError: {
|
||||
Code: "XMinioAdminReplicationRemoteConnectionError",
|
||||
Description: "Remote service endpoint or target bucket not available",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrBucketRemoteIdenticalToSource: {
|
||||
Code: "XminioAdminRemoteIdenticalToSource",
|
||||
Code: "XMinioAdminRemoteIdenticalToSource",
|
||||
Description: "The remote target cannot be identical to source",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteAlreadyExists: {
|
||||
Code: "XminioAdminBucketRemoteAlreadyExists",
|
||||
Code: "XMinioAdminBucketRemoteAlreadyExists",
|
||||
Description: "The remote target already exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteLabelInUse: {
|
||||
Code: "XMinioAdminBucketRemoteLabelInUse",
|
||||
Description: "The remote target with this label already exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteRemoveDisallowed: {
|
||||
Code: "XMinioAdminRemoteRemoveDisallowed",
|
||||
Description: "Replication configuration exists with this ARN.",
|
||||
Description: "This ARN is in use by an existing configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteArnTypeInvalid: {
|
||||
@@ -867,9 +860,9 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The bucket remote ARN does not have correct format",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationTargetNotVersionedError: {
|
||||
Code: "ReplicationTargetNotVersionedError",
|
||||
Description: "The replication target does not have versioning enabled",
|
||||
ErrRemoteTargetNotVersionedError: {
|
||||
Code: "RemoteTargetNotVersionedError",
|
||||
Description: "The remote target does not have versioning enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationSourceNotVersionedError: {
|
||||
@@ -984,7 +977,7 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMetadataTooLarge: {
|
||||
Code: "InvalidArgument",
|
||||
Code: "MetadataTooLarge",
|
||||
Description: "Your metadata headers exceed the maximum allowed metadata size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
@@ -1919,22 +1912,26 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrAdminNoSuchQuotaConfiguration
|
||||
case BucketReplicationConfigNotFound:
|
||||
apiErr = ErrReplicationConfigurationNotFoundError
|
||||
case BucketReplicationDestinationNotFound:
|
||||
apiErr = ErrReplicationDestinationNotFoundError
|
||||
case BucketRemoteDestinationNotFound:
|
||||
apiErr = ErrRemoteDestinationNotFoundError
|
||||
case BucketReplicationDestinationMissingLock:
|
||||
apiErr = ErrReplicationDestinationMissingLock
|
||||
case BucketRemoteTargetNotFound:
|
||||
apiErr = ErrReplicationTargetNotFoundError
|
||||
apiErr = ErrRemoteTargetNotFoundError
|
||||
case BucketRemoteConnectionErr:
|
||||
apiErr = ErrReplicationRemoteConnectionError
|
||||
case BucketRemoteAlreadyExists:
|
||||
apiErr = ErrBucketRemoteAlreadyExists
|
||||
case BucketRemoteLabelInUse:
|
||||
apiErr = ErrBucketRemoteLabelInUse
|
||||
case BucketRemoteArnTypeInvalid:
|
||||
apiErr = ErrBucketRemoteArnTypeInvalid
|
||||
case BucketRemoteArnInvalid:
|
||||
apiErr = ErrBucketRemoteArnInvalid
|
||||
case BucketRemoteRemoveDisallowed:
|
||||
apiErr = ErrBucketRemoteRemoveDisallowed
|
||||
case BucketReplicationTargetNotVersioned:
|
||||
apiErr = ErrReplicationTargetNotVersionedError
|
||||
case BucketRemoteTargetNotVersioned:
|
||||
apiErr = ErrRemoteTargetNotVersionedError
|
||||
case BucketReplicationSourceNotVersioned:
|
||||
apiErr = ErrReplicationSourceNotVersionedError
|
||||
case BucketQuotaExceeded:
|
||||
@@ -1967,6 +1964,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrBackendDown
|
||||
case ObjectNameTooLong:
|
||||
apiErr = ErrKeyTooLongError
|
||||
case dns.ErrInvalidBucketName:
|
||||
apiErr = ErrInvalidBucketName
|
||||
default:
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
@@ -2003,6 +2002,12 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
|
||||
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
e, ok := err.(dns.ErrInvalidBucketName)
|
||||
if ok {
|
||||
code := toAPIErrorCode(ctx, e)
|
||||
apiErr = errorCodes.ToAPIErrWithErr(code, e)
|
||||
}
|
||||
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
|
||||
@@ -84,7 +84,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
@@ -128,6 +128,11 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
var isSet bool
|
||||
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
|
||||
if !strings.HasPrefix(k, userMetadataPrefix) {
|
||||
@@ -142,15 +147,26 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
}
|
||||
}
|
||||
|
||||
var start, rangeLen int64
|
||||
totalObjectSize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// for providing ranged content
|
||||
start, rangeLen, err := rs.GetOffsetLength(totalObjectSize)
|
||||
if err != nil {
|
||||
return err
|
||||
if opts.PartNumber > 0 {
|
||||
var start, end int64
|
||||
for i := 0; i < len(objInfo.Parts) && i < opts.PartNumber; i++ {
|
||||
start = end
|
||||
end = start + objInfo.Parts[i].ActualSize - 1
|
||||
}
|
||||
rs = &HTTPRangeSpec{Start: start, End: end}
|
||||
rangeLen = end - start + 1
|
||||
} else {
|
||||
// for providing ranged content
|
||||
start, rangeLen, err = rs.GetOffsetLength(totalObjectSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Set content length.
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -35,7 +36,7 @@ import (
|
||||
const (
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 10000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
@@ -407,7 +408,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
// generates ListBucketsResponse from array of BucketInfo which can be
|
||||
// serialized to match XML and JSON API spec output.
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
var listbuckets []Bucket
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{}
|
||||
|
||||
@@ -427,8 +428,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
var versions []ObjectVersion
|
||||
var prefixes []CommonPrefix
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListVersionsResponse{}
|
||||
|
||||
@@ -472,6 +472,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
data.VersionIDMarker = versionIDMarker
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -483,8 +484,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
var contents []Object
|
||||
var prefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsResponse{}
|
||||
|
||||
@@ -516,9 +516,10 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
data.Marker = s3EncodeName(marker, encodingType)
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -530,8 +531,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
var contents []Object
|
||||
var commonPrefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
@@ -564,6 +564,10 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
content.UserMetadata[k] = v
|
||||
}
|
||||
}
|
||||
@@ -580,6 +584,8 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
|
||||
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
|
||||
data.IsTruncated = isTruncated
|
||||
|
||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -697,10 +703,6 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
|
||||
}
|
||||
|
||||
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
if newObjectLayerFn() == nil {
|
||||
// Server still in safe mode.
|
||||
w.Header().Set(xhttp.MinIOServerStatus, "safemode")
|
||||
}
|
||||
setCommonHeaders(w)
|
||||
if mType != mimeNone {
|
||||
w.Header().Set(xhttp.ContentType, string(mType))
|
||||
@@ -759,14 +761,14 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "InvalidRegion":
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalServerRegion)
|
||||
case "AuthorizationHeaderMalformed":
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalBrowserEnabled {
|
||||
if newObjectLayerFn() == nil {
|
||||
// server still in safe mode.
|
||||
w.Header().Set(xhttp.MinIOServerStatus, "safemode")
|
||||
}
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
@@ -817,38 +819,3 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
// writeCustomErrorResponseXML - similar to writeErrorResponse,
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err APIError, errBody string, reqURL *url.URL, browser bool) {
|
||||
|
||||
switch err.Code {
|
||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalBrowserEnabled {
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
errorResponse := APIErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: errBody,
|
||||
Resource: reqURL.Path,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
||||
HostID: globalDeploymentID,
|
||||
}
|
||||
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
@@ -31,28 +32,15 @@ func newHTTPServerFn() *xhttp.Server {
|
||||
return globalHTTPServer
|
||||
}
|
||||
|
||||
func newObjectLayerWithoutSafeModeFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newObjectLayerFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newCachedObjectLayerFn() CacheObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
return globalCacheObjectAPI
|
||||
}
|
||||
|
||||
@@ -60,31 +48,53 @@ func newCachedObjectLayerFn() CacheObjectLayer {
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
CacheAPI func() CacheObjectLayer
|
||||
// Returns true of handlers should interpret encryption.
|
||||
EncryptionEnabled func() bool
|
||||
// Returns true if handlers allow SSE-KMS encryption headers.
|
||||
AllowSSEKMS func() bool
|
||||
}
|
||||
|
||||
// getHost tries its best to return the request host.
|
||||
// According to section 14.23 of RFC 2616 the Host header
|
||||
// can include the port number if the default value of 80 is not used.
|
||||
func getHost(r *http.Request) string {
|
||||
if r.URL.IsAbs() {
|
||||
return r.URL.Host
|
||||
}
|
||||
return r.Host
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) {
|
||||
func registerAPIRouter(router *mux.Router) {
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
CacheAPI: newCachedObjectLayerFn,
|
||||
EncryptionEnabled: func() bool {
|
||||
return encryptionEnabled
|
||||
},
|
||||
AllowSSEKMS: func() bool {
|
||||
return allowSSEKMS
|
||||
},
|
||||
}
|
||||
|
||||
// API Router
|
||||
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
|
||||
|
||||
var routers []*mux.Router
|
||||
for _, domainName := range globalDomainNames {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
if IsKubernetes() {
|
||||
routers = append(routers, apiRouter.MatcherFunc(func(r *http.Request, match *mux.RouteMatch) bool {
|
||||
host, _, err := net.SplitHostPort(getHost(r))
|
||||
if err != nil {
|
||||
host = r.Host
|
||||
}
|
||||
// Make sure to skip matching minio.<domain>` this is
|
||||
// specifically meant for operator/k8s deployment
|
||||
// The reason we need to skip this is for a special
|
||||
// usecase where we need to make sure that
|
||||
// minio.<namespace>.svc.<cluster_domain> is ignored
|
||||
// by the bucketDNS style to ensure that path style
|
||||
// is available and honored at this domain.
|
||||
//
|
||||
// All other `<bucket>.<namespace>.svc.<cluster_domain>`
|
||||
// makes sure that buckets are routed through this matcher
|
||||
// to match for `<bucket>`
|
||||
return host != minioReservedBucket+"."+domainName
|
||||
}).Host("{bucket:.+}."+domainName).Subrouter())
|
||||
} else {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
}
|
||||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
@@ -94,7 +104,10 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
|
||||
// CopyObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).
|
||||
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
@@ -138,7 +151,8 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
|
||||
// CopyObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
|
||||
@@ -305,8 +319,8 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.MethodNotAllowedHandler = http.HandlerFunc(collectAPIStats("methodnotallowed", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
||||
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(errorResponseHandler))
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -334,7 +334,8 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
|
||||
}
|
||||
|
||||
if cred.AccessKey == "" {
|
||||
if action != policy.ListAllMyBucketsAction && cred.AccessKey == "" {
|
||||
// Anonymous checks are not meant for ListBuckets action
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: action,
|
||||
@@ -346,8 +347,26 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
|
||||
// verify as a fallback.
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
@@ -361,6 +380,23 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
|
||||
// verify as a fallback.
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(policy.ListBucketAction),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
@@ -55,19 +54,20 @@ func (h *healRoutine) queueHealTask(task healTask) {
|
||||
h.tasks <- task
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq(tolerance int32) {
|
||||
func waitForLowHTTPReq(tolerance int32, maxWait time.Duration) {
|
||||
const wait = 10 * time.Millisecond
|
||||
waitCount := maxWait / wait
|
||||
|
||||
// Bucket notification and http trace are not costly, it is okay to ignore them
|
||||
// while counting the number of concurrent connections
|
||||
tolerance += int32(globalHTTPListen.NumSubscribers() + globalHTTPTrace.NumSubscribers())
|
||||
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||
waitCount := 600
|
||||
// Any requests in progress, delay the heal.
|
||||
for (httpServer.GetRequestCount() >= tolerance) &&
|
||||
waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
time.Sleep(wait)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -82,7 +82,7 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()))
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()), time.Second)
|
||||
|
||||
var res madmin.HealResultItem
|
||||
var err error
|
||||
@@ -100,6 +100,7 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
ObjectPathUpdated(path.Join(task.bucket, task.object))
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
|
||||
case <-h.doneCh:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
@@ -116,24 +117,6 @@ func newHealRoutine() *healRoutine {
|
||||
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
nh := newBgHealSequence()
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
if err := nh.healDiskMeta(); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(nh.ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(nh)
|
||||
}
|
||||
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
@@ -145,24 +128,5 @@ func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpt
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers if healing is not required.
|
||||
if err == nil {
|
||||
// Notify servers in background and retry if needed.
|
||||
go func() {
|
||||
retry:
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(opts.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
if nerr.Err.Error() == errServerNotInitialized.Error() {
|
||||
time.Sleep(time.Second)
|
||||
goto retry
|
||||
}
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -18,27 +18,34 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
const defaultMonitorNewDiskInterval = time.Minute * 3
|
||||
const (
|
||||
defaultMonitorNewDiskInterval = time.Second * 10
|
||||
healingTrackerFilename = ".healing.bin"
|
||||
)
|
||||
|
||||
func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
go monitorLocalDisksAndHeal(ctx, objAPI)
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
type healingTracker struct {
|
||||
ID string
|
||||
|
||||
// future add more tracking capabilities
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
z, ok := objAPI.(*erasureZones)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
initBackgroundHealing(ctx, objAPI) // start quick background healing
|
||||
|
||||
var bgSeq *healSequence
|
||||
var found bool
|
||||
|
||||
@@ -50,82 +57,129 @@ func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
|
||||
if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
|
||||
drivesToHeal, defaultMonitorNewDiskInterval))
|
||||
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
if err := bgSeq.healDiskMeta(); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
for _, ep := range globalEndpoints {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
disk, _, err := connectEndpoint(endpoint)
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, endpoint)
|
||||
} else if err == nil && disk != nil && disk.Healing() {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
}
|
||||
return disksToHeal
|
||||
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence())
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureZones, bgSeq *healSequence) {
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(defaultMonitorNewDiskInterval):
|
||||
// Attempt a heal as the server starts-up first.
|
||||
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
|
||||
var healNewDisks bool
|
||||
for i, ep := range globalEndpoints {
|
||||
localDisksToHeal := Endpoints{}
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
if err == errUnformattedDisk {
|
||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||
}
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()), time.Second)
|
||||
|
||||
var erasureSetInZoneDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
healDisks := globalBackgroundHealState.getHealLocalDisks()
|
||||
if len(healDisks) > 0 {
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{bucket: nopHeal}
|
||||
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
len(healDisks)))
|
||||
|
||||
erasureSetInZoneDisksToHeal = make([]map[int][]StorageAPI, len(z.zones))
|
||||
for i := range z.zones {
|
||||
erasureSetInZoneDisksToHeal[i] = map[int][]StorageAPI{}
|
||||
}
|
||||
if len(localDisksToHeal) == 0 {
|
||||
}
|
||||
|
||||
// heal only if new disks found.
|
||||
for _, endpoint := range healDisks {
|
||||
disk, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, true)
|
||||
continue
|
||||
}
|
||||
localDisksInZoneHeal[i] = localDisksToHeal
|
||||
healNewDisks = true
|
||||
}
|
||||
|
||||
// Reformat disks only if needed.
|
||||
if !healNewDisks {
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("New unformatted drives detected attempting to heal...")
|
||||
for i, disks := range localDisksInZoneHeal {
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%s' on %s zone", disk, humanize.Ordinal(i+1))
|
||||
zoneIdx := globalEndpoints.GetLocalZoneIdx(disk.Endpoint())
|
||||
if zoneIdx < 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{bucket: nopHeal}
|
||||
|
||||
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
|
||||
// Compute the list of erasure set to heal
|
||||
for i, localDisksToHeal := range localDisksInZoneHeal {
|
||||
var erasureSetToHeal []int
|
||||
for _, endpoint := range localDisksToHeal {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, true)
|
||||
continue
|
||||
}
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, false)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[zoneIdx].format, format)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, false)
|
||||
continue
|
||||
}
|
||||
erasureSetInZoneToHeal[i] = erasureSetToHeal
|
||||
|
||||
erasureSetInZoneDisksToHeal[zoneIdx][setIndex] = append(erasureSetInZoneDisksToHeal[zoneIdx][setIndex], disk)
|
||||
}
|
||||
|
||||
// Heal all erasure sets that need
|
||||
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
|
||||
for _, setIndex := range erasureSetToHeal {
|
||||
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].drivesPerSet)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
buckets, _ := z.ListBucketsHeal(ctx)
|
||||
for i, setMap := range erasureSetInZoneDisksToHeal {
|
||||
for setIndex, disks := range setMap {
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%s' on %s zone", disk, humanize.Ordinal(i+1))
|
||||
|
||||
lbDisks := z.zones[i].sets[setIndex].getLoadBalancedNDisks(z.zones[i].listTolerancePerSet)
|
||||
if err := healErasureSet(ctx, setIndex, buckets, lbDisks); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s zone complete", disk, humanize.Ordinal(i+1))
|
||||
|
||||
if err := disk.DeleteFile(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
healingTrackerFilename); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
110
cmd/background-newdisks-heal-ops_gen.go
Normal file
110
cmd/background-newdisks-heal-ops_gen.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ID":
|
||||
z.ID, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
// write "ID"
|
||||
err = en.Append(0x81, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.ID)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// string "ID"
|
||||
o = append(o, 0x81, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ID":
|
||||
z.ID, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z healingTracker) Msgsize() (s int) {
|
||||
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID)
|
||||
return
|
||||
}
|
||||
123
cmd/background-newdisks-heal-ops_gen_test.go
Normal file
123
cmd/background-newdisks-heal-ops_gen_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalhealingTracker(t *testing.T) {
|
||||
v := healingTracker{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsghealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsghealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalhealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodehealingTracker(t *testing.T) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodehealingTracker Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := healingTracker{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodehealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodehealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
@@ -80,7 +81,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
err := disk.CreateFile(volume, filePath, totalFileSize, r)
|
||||
err := disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r)
|
||||
r.CloseWithError(err)
|
||||
close(bw.canClose)
|
||||
}()
|
||||
@@ -118,7 +119,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
// For the first ReadAt() call we need to open the stream for reading.
|
||||
b.currOffset = offset
|
||||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
||||
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -139,8 +140,8 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err := &errHashMismatch{fmt.Sprintf("hashes do not match expected %s, got %s",
|
||||
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
err := &errHashMismatch{fmt.Sprintf("Disk: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
@@ -33,14 +35,14 @@ type wholeBitrotWriter struct {
|
||||
}
|
||||
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
err := b.disk.AppendFile(b.volume, b.filePath, p)
|
||||
err := b.disk.AppendFile(context.TODO(), b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
_, err = b.Hash.Write(p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
@@ -68,15 +70,13 @@ type wholeBitrotReader struct {
|
||||
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) {
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
ctx := GlobalContext
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
|
||||
logger.LogIf(ctx, err)
|
||||
if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(GlobalContext, errLessData)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
@@ -34,12 +35,12 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
volume := "testvol"
|
||||
filePath := "testfile"
|
||||
|
||||
disk, err := newXLStorage(tmpDir, "")
|
||||
disk, err := newLocalXLStorage(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
disk.MakeVol(volume)
|
||||
disk.MakeVol(context.Background(), volume)
|
||||
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
|
||||
|
||||
|
||||
@@ -131,7 +131,7 @@ func (client *bootstrapRESTClient) callWithContext(ctx context.Context, method s
|
||||
values = make(url.Values)
|
||||
}
|
||||
|
||||
respBody, err = client.restClient.CallWithContext(ctx, method, values, body, length)
|
||||
respBody, err = client.restClient.Call(ctx, method, values, body, length)
|
||||
if err == nil {
|
||||
return respBody, nil
|
||||
}
|
||||
@@ -178,17 +178,22 @@ func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones)
|
||||
}
|
||||
onlineServers++
|
||||
}
|
||||
// Sleep for a while - so that we don't go into
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
retries++
|
||||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
// Sleep for a while - so that we don't go into
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
retries++
|
||||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d remote servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -228,13 +233,13 @@ func newBootstrapRESTClient(endpoint Endpoint) *bootstrapRESTClient {
|
||||
}
|
||||
}
|
||||
|
||||
trFn := newInternodeHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)
|
||||
trFn := newInternodeHTTPTransport(tlsConfig, rest.DefaultTimeout)
|
||||
restClient := rest.NewClient(serverURL, trFn, newAuthToken)
|
||||
restClient.HealthCheckFn = func() bool {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, restClient.HealthCheckTimeout)
|
||||
// Instantiate a new rest client for healthcheck
|
||||
// to avoid recursive healthCheckFn()
|
||||
respBody, err := rest.NewClient(serverURL, trFn, newAuthToken).CallWithContext(ctx, bootstrapRESTMethodHealth, nil, nil, -1)
|
||||
respBody, err := rest.NewClient(serverURL, trFn, newAuthToken).Call(ctx, bootstrapRESTMethodHealth, nil, nil, -1)
|
||||
xhttp.DrainBody(respBody)
|
||||
cancel()
|
||||
var ne *rest.NetworkError
|
||||
|
||||
@@ -34,7 +34,7 @@ func NewBucketSSEConfigSys() *BucketSSEConfigSys {
|
||||
// Get - gets bucket encryption config for the given bucket.
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -72,7 +72,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
|
||||
// Get buckets in the DNS
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && err != dns.ErrNoEntriesFound {
|
||||
if err != nil && err != dns.ErrNoEntriesFound && err != dns.ErrNotImplemented {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return
|
||||
}
|
||||
@@ -80,33 +80,35 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
bucketsSet := set.NewStringSet()
|
||||
bucketsToBeUpdated := set.NewStringSet()
|
||||
bucketsInConflict := set.NewStringSet()
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
r, ok := dnsBuckets[bucket.Name]
|
||||
if !ok {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
// No difference in terms of domainIPs and nothing
|
||||
// has changed so we don't change anything on the etcd.
|
||||
if dnsBuckets != nil {
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
r, ok := dnsBuckets[bucket.Name]
|
||||
if !ok {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
// if domain IPs intersect then it won't be an empty set.
|
||||
// such an intersection means that bucket exists on etcd.
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
// No difference in terms of domainIPs and nothing
|
||||
// has changed so we don't change anything on the etcd.
|
||||
continue
|
||||
}
|
||||
// if domain IPs intersect then it won't be an empty set.
|
||||
// such an intersection means that bucket exists on etcd.
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
// No IPs seem to intersect, this means that bucket exists but has
|
||||
// different IP addresses perhaps from a different deployment.
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
bucketsInConflict.Add(bucket.Name)
|
||||
}
|
||||
// No IPs seem to intersect, this means that bucket exists but has
|
||||
// different IP addresses perhaps from a different deployment.
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
bucketsInConflict.Add(bucket.Name)
|
||||
}
|
||||
|
||||
// Add/update buckets that are not registered with the DNS
|
||||
@@ -449,14 +451,15 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
deleteList := toNames(objectsToDelete)
|
||||
dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
})
|
||||
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
for i := range errs {
|
||||
dindex := objectsToDelete[deleteList[i]]
|
||||
apiErr := toAPIError(ctx, errs[i])
|
||||
if apiErr.Code == "" || apiErr.Code == "NoSuchKey" {
|
||||
if apiErr.Code == "" || apiErr.Code == "NoSuchKey" || apiErr.Code == "InvalidArgument" {
|
||||
deletedObjects[dindex] = dObjects[i]
|
||||
continue
|
||||
}
|
||||
@@ -561,7 +564,9 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
if globalDNSConfig != nil {
|
||||
sr, err := globalDNSConfig.Get(bucket)
|
||||
if err != nil {
|
||||
if err == dns.ErrNoEntriesFound {
|
||||
// ErrNotImplemented indicates a DNS backend that doesn't need to check if bucket already
|
||||
// exists elsewhere
|
||||
if err == dns.ErrNoEntriesFound || err == dns.ErrNotImplemented {
|
||||
// Proceed to creating a bucket.
|
||||
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
@@ -654,16 +659,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if !api.EncryptionEnabled() && crypto.IsRequested(r.Header) {
|
||||
|
||||
if !objectAPI.IsEncryptionSupported() && crypto.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
|
||||
// To detect if the client has disconnected.
|
||||
r.Body = &contextReader{r.Body, r.Context()}
|
||||
|
||||
// Require Content-Length to be set in the request
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
@@ -762,7 +765,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Make sure formValues adhere to policy restrictions.
|
||||
if err = checkPostPolicy(formValues, postPolicyForm); err != nil {
|
||||
writeCustomErrorResponseXML(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), err.Error(), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrAccessDenied, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -804,7 +807,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if _, err = globalBucketSSEConfigSys.Get(bucket); err == nil || globalAutoEncryption {
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
r.Header.Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -999,7 +1002,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually using etcdctl", err))
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually", err))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ type LifecycleSys struct{}
|
||||
// Get - gets lifecycle config associated to a given bucket name.
|
||||
func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
@@ -24,12 +24,37 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
|
||||
inParallel := func(objects []ObjectInfo) {
|
||||
g := errgroup.WithNErrs(len(objects))
|
||||
for index := range objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
objects[index].ETag = objects[index].GetActualETag(nil)
|
||||
objects[index].Size, _ = objects[index].GetActualSize()
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
g.Wait()
|
||||
}
|
||||
const maxConcurrent = 500
|
||||
for {
|
||||
if len(objects) < maxConcurrent {
|
||||
inParallel(objects)
|
||||
return
|
||||
}
|
||||
inParallel(objects[:maxConcurrent])
|
||||
objects = objects[maxConcurrent:]
|
||||
}
|
||||
}
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
// if one of the args do not meet the required conditions.
|
||||
// Special conditions required by MinIO server are as below
|
||||
@@ -69,7 +94,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketVersionsAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -89,6 +114,15 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
// Forward the request using Source IP or bucket
|
||||
forwardStr := handlers.GetSourceIPFromHeaders(r)
|
||||
if forwardStr == "" {
|
||||
forwardStr = bucket
|
||||
}
|
||||
if proxyRequestByStringHash(ctx, w, r, forwardStr) {
|
||||
return
|
||||
}
|
||||
|
||||
listObjectVersions := objectAPI.ListObjectVersions
|
||||
|
||||
// Inititate a list object versions operation based on the input params.
|
||||
@@ -100,16 +134,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectVersionsInfo.Objects {
|
||||
if crypto.IsEncrypted(listObjectVersionsInfo.Objects[i].UserDefined) {
|
||||
listObjectVersionsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectVersionsInfo.Objects[i], false)
|
||||
}
|
||||
listObjectVersionsInfo.Objects[i].Size, err = listObjectVersionsInfo.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
concurrentDecryptETag(ctx, listObjectVersionsInfo.Objects)
|
||||
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo)
|
||||
|
||||
@@ -178,16 +203,7 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsV2Info.Objects {
|
||||
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
|
||||
}
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
@@ -264,16 +280,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsV2Info.Objects {
|
||||
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
|
||||
}
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
@@ -339,8 +346,8 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http
|
||||
return proxyRequest(ctx, w, r, ep)
|
||||
}
|
||||
|
||||
func proxyRequestByBucket(ctx context.Context, w http.ResponseWriter, r *http.Request, bucket string) (success bool) {
|
||||
return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(bucket, len(globalProxyEndpoints)))
|
||||
func proxyRequestByStringHash(ctx context.Context, w http.ResponseWriter, r *http.Request, str string) (success bool) {
|
||||
return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(str, len(globalProxyEndpoints)))
|
||||
}
|
||||
|
||||
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
|
||||
@@ -381,7 +388,12 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
if proxyRequestByBucket(ctx, w, r, bucket) {
|
||||
// Forward the request using Source IP or bucket
|
||||
forwardStr := handlers.GetSourceIPFromHeaders(r)
|
||||
if forwardStr == "" {
|
||||
forwardStr = bucket
|
||||
}
|
||||
if proxyRequestByStringHash(ctx, w, r, forwardStr) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -396,16 +408,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsInfo.Objects {
|
||||
if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
||||
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
|
||||
}
|
||||
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].GetActualSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
concurrentDecryptETag(ctx, listObjectsInfo.Objects)
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
@@ -71,7 +72,7 @@ func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) {
|
||||
// Update update bucket metadata for the specified config file.
|
||||
// The configData data should not be modified after being sent here.
|
||||
func (sys *BucketMetadataSys) Update(bucket string, configFile string, configData []byte) error {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -80,7 +81,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
// This code is needed only for gateway implementations.
|
||||
switch configFile {
|
||||
case bucketSSEConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -89,7 +90,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketLifecycleConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -98,7 +99,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketTaggingConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -107,7 +108,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketNotificationConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -148,15 +149,27 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
meta.EncryptionConfigXML = configData
|
||||
case bucketTaggingConfig:
|
||||
meta.TaggingConfigXML = configData
|
||||
case objectLockConfig:
|
||||
meta.ObjectLockConfigXML = configData
|
||||
case bucketVersioningConfig:
|
||||
meta.VersioningConfigXML = configData
|
||||
case bucketQuotaConfigFile:
|
||||
meta.QuotaConfigJSON = configData
|
||||
case objectLockConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.ObjectLockConfigXML = configData
|
||||
case bucketVersioningConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.VersioningConfigXML = configData
|
||||
case bucketReplicationConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.ReplicationConfigXML = configData
|
||||
case bucketTargetsFile:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.BucketTargetsConfigJSON = configData
|
||||
default:
|
||||
return fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
@@ -176,6 +189,13 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
// If no metadata exists errConfigNotFound is returned and a new metadata is returned.
|
||||
// Only a shallow copy is returned, so referenced data should not be modified,
|
||||
// but can be replaced atomically.
|
||||
//
|
||||
// This function should only be used with
|
||||
// - GetBucketInfo
|
||||
// - ListBuckets
|
||||
// - ListBucketsHeal (only in case of erasure coding mode)
|
||||
// For all other bucket specific metadata, use the relevant
|
||||
// calls implemented specifically for each of those features.
|
||||
func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
|
||||
if globalIsGateway || bucket == minioMetaBucket {
|
||||
return newBucketMetadata(bucket), errConfigNotFound
|
||||
@@ -253,9 +273,9 @@ func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Life
|
||||
// GetNotificationConfig returns configured notification config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Config, error) {
|
||||
if globalIsGateway && globalGatewayName == "nas" {
|
||||
if globalIsGateway && globalGatewayName == NASBackendGateway {
|
||||
// Only needed in case of NAS gateway.
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -293,7 +313,7 @@ func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEC
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -353,10 +373,24 @@ func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.Buc
|
||||
return meta.bucketTargetConfig, nil
|
||||
}
|
||||
|
||||
// GetBucketTarget returns the target for the bucket and arn.
|
||||
func (sys *BucketMetadataSys) GetBucketTarget(bucket string, arn string) (madmin.BucketTarget, error) {
|
||||
targets, err := sys.GetBucketTargetsConfig(bucket)
|
||||
if err != nil {
|
||||
return madmin.BucketTarget{}, err
|
||||
}
|
||||
for _, t := range targets.Targets {
|
||||
if t.Arn == arn {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
return madmin.BucketTarget{}, errConfigNotFound
|
||||
}
|
||||
|
||||
// GetConfig returns a specific configuration from the bucket metadata.
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetConfig(bucket string) (BucketMetadata, error) {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return newBucketMetadata(bucket), errServerNotInitialized
|
||||
}
|
||||
@@ -397,8 +431,9 @@ func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, ob
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load PolicySys once during boot.
|
||||
return sys.load(ctx, buckets, objAPI)
|
||||
// Load bucket metadata sys in background
|
||||
go logger.LogIf(ctx, sys.load(ctx, buckets, objAPI))
|
||||
return nil
|
||||
}
|
||||
|
||||
// concurrently load bucket metadata to speed up loading bucket metadata.
|
||||
|
||||
@@ -107,6 +107,10 @@ func newBucketMetadata(name string) BucketMetadata {
|
||||
// Load - loads the metadata of bucket by name from ObjectLayer api.
|
||||
// If an error is returned the returned metadata will be default initialized.
|
||||
func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string) error {
|
||||
if name == "" {
|
||||
logger.LogIf(ctx, errors.New("bucket name cannot be empty"))
|
||||
return errors.New("bucket name cannot be empty")
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
|
||||
data, err := readConfig(ctx, api, configFile)
|
||||
if err != nil {
|
||||
@@ -128,20 +132,22 @@ func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string)
|
||||
}
|
||||
// OK, parse data.
|
||||
_, err = b.UnmarshalMsg(data[4:])
|
||||
b.Name = name // in-case parsing failed for some reason, make sure bucket name is not empty.
|
||||
return err
|
||||
}
|
||||
|
||||
// loadBucketMetadata loads and migrates to bucket metadata.
|
||||
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (BucketMetadata, error) {
|
||||
b := newBucketMetadata(bucket)
|
||||
err := b.Load(ctx, objectAPI, bucket)
|
||||
err := b.Load(ctx, objectAPI, b.Name)
|
||||
if err == nil {
|
||||
return b, b.convertLegacyConfigs(ctx, objectAPI)
|
||||
}
|
||||
|
||||
if err != errConfigNotFound {
|
||||
if !errors.Is(err, errConfigNotFound) {
|
||||
return b, err
|
||||
}
|
||||
|
||||
// Old bucket without bucket metadata. Hence we migrate existing settings.
|
||||
return b, b.convertLegacyConfigs(ctx, objectAPI)
|
||||
}
|
||||
@@ -354,7 +360,7 @@ func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
|
||||
|
||||
// deleteBucketMetadata deletes bucket metadata
|
||||
// If config does not exist no error is returned.
|
||||
func deleteBucketMetadata(ctx context.Context, obj ObjectLayer, bucket string) error {
|
||||
func deleteBucketMetadata(ctx context.Context, obj objectDeleter, bucket string) error {
|
||||
metadataFiles := []string{
|
||||
dataUsageCacheName,
|
||||
bucketMetadataFile,
|
||||
|
||||
@@ -33,7 +33,7 @@ type BucketObjectLockSys struct{}
|
||||
// Get - Get retention configuration.
|
||||
func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return r, errServerNotInitialized
|
||||
}
|
||||
|
||||
@@ -68,6 +68,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
principalType := "Anonymous"
|
||||
if username != "" {
|
||||
principalType = "User"
|
||||
if len(claims) > 0 {
|
||||
principalType = "AssumedRole"
|
||||
}
|
||||
if username == globalActiveCred.AccessKey {
|
||||
principalType = "Account"
|
||||
}
|
||||
}
|
||||
|
||||
vid := r.URL.Query().Get("versionId")
|
||||
@@ -143,7 +149,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
for k, v := range claims {
|
||||
vStr, ok := v.(string)
|
||||
if ok {
|
||||
args[k] = []string{vStr}
|
||||
// Special case for AD/LDAP STS users
|
||||
if k == ldapUser {
|
||||
args["user"] = []string{vStr}
|
||||
} else {
|
||||
args[k] = []string{vStr}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
|
||||
}
|
||||
|
||||
func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64) error {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -71,6 +71,8 @@ func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64)
|
||||
sys.bucketStorageCache.Once.Do(func() {
|
||||
sys.bucketStorageCache.TTL = 1 * time.Second
|
||||
sys.bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer done()
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -18,7 +18,10 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
@@ -27,6 +30,7 @@ import (
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/bandwidth"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
@@ -35,7 +39,7 @@ import (
|
||||
// gets replication config associated to a given bucket name.
|
||||
func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -49,12 +53,12 @@ func getReplicationConfig(ctx context.Context, bucketName string) (rc *replicati
|
||||
// validateReplicationDestination returns error if replication destination bucket missing or not configured
|
||||
// It also returns true if replication destination is same as this server.
|
||||
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config) (bool, error) {
|
||||
clnt := globalBucketTargetSys.GetReplicationTargetClient(ctx, rCfg.RoleArn)
|
||||
clnt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rCfg.RoleArn)
|
||||
if clnt == nil {
|
||||
return false, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
if found, _ := clnt.BucketExists(ctx, rCfg.GetDestination().Bucket); !found {
|
||||
return false, BucketReplicationDestinationNotFound{Bucket: rCfg.GetDestination().Bucket}
|
||||
return false, BucketRemoteDestinationNotFound{Bucket: rCfg.GetDestination().Bucket}
|
||||
}
|
||||
if ret, err := globalBucketObjectLockSys.Get(bucket); err == nil {
|
||||
if ret.LockEnabled {
|
||||
@@ -75,8 +79,23 @@ func validateReplicationDestination(ctx context.Context, bucket string, rCfg *re
|
||||
return false, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
func mustReplicateWeb(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string, permErr APIErrorCode) bool {
|
||||
if permErr != ErrNone {
|
||||
return false
|
||||
}
|
||||
return mustReplicater(ctx, r, bucket, object, meta, replStatus)
|
||||
}
|
||||
|
||||
// mustReplicate returns true if object meets replication criteria.
|
||||
func mustReplicate(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string) bool {
|
||||
if s3Err := isPutActionAllowed(getRequestAuthType(r), bucket, "", r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
|
||||
return false
|
||||
}
|
||||
return mustReplicater(ctx, r, bucket, object, meta, replStatus)
|
||||
}
|
||||
|
||||
// mustReplicater returns true if object meets replication criteria.
|
||||
func mustReplicater(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string) bool {
|
||||
if globalIsGateway {
|
||||
return false
|
||||
}
|
||||
@@ -86,9 +105,6 @@ func mustReplicate(ctx context.Context, r *http.Request, bucket, object string,
|
||||
if replication.StatusType(replStatus) == replication.Replica {
|
||||
return false
|
||||
}
|
||||
if s3Err := isPutActionAllowed(getRequestAuthType(r), bucket, object, r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
|
||||
return false
|
||||
}
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
return false
|
||||
@@ -104,15 +120,17 @@ func mustReplicate(ctx context.Context, r *http.Request, bucket, object string,
|
||||
return cfg.Replicate(opts)
|
||||
}
|
||||
|
||||
func putReplicationOpts(dest replication.Destination, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
|
||||
func putReplicationOpts(ctx context.Context, dest replication.Destination, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
|
||||
meta := make(map[string]string)
|
||||
for k, v := range objInfo.UserDefined {
|
||||
if k == xhttp.AmzBucketReplicationStatus {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
continue
|
||||
}
|
||||
meta[k] = v
|
||||
}
|
||||
|
||||
tag, err := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -122,14 +140,17 @@ func putReplicationOpts(dest replication.Destination, objInfo ObjectInfo) (putOp
|
||||
sc = objInfo.StorageClass
|
||||
}
|
||||
putOpts = miniogo.PutObjectOptions{
|
||||
UserMetadata: meta,
|
||||
UserTags: tag.ToMap(),
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: sc,
|
||||
ReplicationVersionID: objInfo.VersionID,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
ReplicationMTime: objInfo.ModTime,
|
||||
UserMetadata: meta,
|
||||
UserTags: tag.ToMap(),
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: sc,
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
},
|
||||
}
|
||||
if mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {
|
||||
rmode := miniogo.RetentionMode(mode)
|
||||
@@ -148,74 +169,198 @@ func putReplicationOpts(dest replication.Destination, objInfo ObjectInfo) (putOp
|
||||
if crypto.S3.IsEncrypted(objInfo.UserDefined) {
|
||||
putOpts.ServerSideEncryption = encrypt.NewSSE()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// replicateObject replicates the specified version of the object to destination bucket
|
||||
// The source object is then updated to reflect the replication status.
|
||||
func replicateObject(ctx context.Context, bucket, object, versionID string, objectAPI ObjectLayer, eventArg *eventArgs, healPending bool) {
|
||||
func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLayer) {
|
||||
bucket := objInfo.Bucket
|
||||
object := objInfo.Name
|
||||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetReplicationTargetClient(ctx, cfg.RoleArn)
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, cfg.RoleArn)
|
||||
if tgt == nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, cfg.RoleArn))
|
||||
return
|
||||
}
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer gr.Close()
|
||||
objInfo := gr.ObjInfo
|
||||
objInfo = gr.ObjInfo
|
||||
size, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
gr.Close()
|
||||
return
|
||||
}
|
||||
|
||||
dest := cfg.GetDestination()
|
||||
if dest.Bucket == "" {
|
||||
gr.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// if heal encounters a pending replication status, either replication
|
||||
// has failed due to server shutdown or crawler and PutObject replication are in contention.
|
||||
healPending := objInfo.ReplicationStatus == replication.Pending
|
||||
|
||||
// In the rare event that replication is in pending state either due to
|
||||
// server shut down/crash before replication completed or healing and PutObject
|
||||
// race - do an additional stat to see if the version ID exists
|
||||
if healPending {
|
||||
_, err := tgt.StatObject(ctx, dest.Bucket, object, miniogo.StatObjectOptions{VersionID: objInfo.VersionID})
|
||||
if err == nil {
|
||||
gr.Close()
|
||||
// object with same VersionID already exists, replication kicked off by
|
||||
// PutObject might have completed.
|
||||
return
|
||||
}
|
||||
}
|
||||
putOpts := putReplicationOpts(dest, objInfo)
|
||||
|
||||
target, err := globalBucketMetadataSys.GetBucketTarget(bucket, cfg.RoleArn)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s cfg:%s err:%s", bucket, cfg.RoleArn, err))
|
||||
return
|
||||
}
|
||||
putOpts := putReplicationOpts(ctx, dest, objInfo)
|
||||
replicationStatus := replication.Complete
|
||||
_, err = tgt.PutObject(ctx, dest.Bucket, object, gr, size, "", "", putOpts)
|
||||
|
||||
// Setup bandwidth throttling
|
||||
totalNodesCount := len(GetRemotePeers(globalEndpoints)) + 1
|
||||
b := target.BandwidthLimit / int64(totalNodesCount)
|
||||
var headerSize int
|
||||
for k, v := range putOpts.Header() {
|
||||
headerSize += len(k) + len(v)
|
||||
}
|
||||
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, objInfo.Bucket, objInfo.Name, gr, headerSize, b)
|
||||
|
||||
_, err = tgt.PutObject(ctx, dest.Bucket, object, r, size, "", "", putOpts)
|
||||
r.Close()
|
||||
if err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
// Notify replication failure event.
|
||||
if eventArg == nil {
|
||||
eventArg = &eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
}
|
||||
}
|
||||
eventArg.EventName = event.OperationReplicationFailed
|
||||
eventArg.Object.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||
sendEvent(*eventArg)
|
||||
}
|
||||
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||
if objInfo.UserTags != "" {
|
||||
objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
}
|
||||
|
||||
// FIXME: add support for missing replication events
|
||||
// - event.ObjectReplicationNotTracked
|
||||
// - event.ObjectReplicationMissedThreshold
|
||||
// - event.ObjectReplicationReplicatedAfterThreshold
|
||||
if replicationStatus == replication.Failed {
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationFailed,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
}
|
||||
|
||||
objInfo.metadataOnly = true // Perform only metadata updates.
|
||||
if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
}, ObjectOptions{VersionID: objInfo.VersionID}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s: %s", objInfo.VersionID, err))
|
||||
}
|
||||
}
|
||||
|
||||
// filterReplicationStatusMetadata filters replication status metadata for COPY
|
||||
func filterReplicationStatusMetadata(metadata map[string]string) map[string]string {
|
||||
// Copy on write
|
||||
dst := metadata
|
||||
var copied bool
|
||||
delKey := func(key string) {
|
||||
if _, ok := metadata[key]; !ok {
|
||||
return
|
||||
}
|
||||
if !copied {
|
||||
dst = make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
dst[k] = v
|
||||
}
|
||||
copied = true
|
||||
}
|
||||
delete(dst, key)
|
||||
}
|
||||
|
||||
delKey(xhttp.AmzBucketReplicationStatus)
|
||||
return dst
|
||||
}
|
||||
|
||||
type replicationState struct {
|
||||
// add future metrics here
|
||||
replicaCh chan ObjectInfo
|
||||
}
|
||||
|
||||
func (r *replicationState) queueReplicaTask(oi ObjectInfo) {
|
||||
select {
|
||||
case r.replicaCh <- oi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalReplicationState *replicationState
|
||||
// TODO: currently keeping it conservative
|
||||
// but eventually can be tuned in future,
|
||||
// take only half the CPUs for replication
|
||||
// conservatively.
|
||||
globalReplicationConcurrent = runtime.GOMAXPROCS(0) / 2
|
||||
)
|
||||
|
||||
func newReplicationState() *replicationState {
|
||||
|
||||
// fix minimum concurrent replication to 1 for single CPU setup
|
||||
if globalReplicationConcurrent == 0 {
|
||||
globalReplicationConcurrent = 1
|
||||
}
|
||||
rs := &replicationState{
|
||||
replicaCh: make(chan ObjectInfo, 10000),
|
||||
}
|
||||
go func() {
|
||||
<-GlobalContext.Done()
|
||||
close(rs.replicaCh)
|
||||
}()
|
||||
return rs
|
||||
}
|
||||
|
||||
// addWorker creates a new worker to process tasks
|
||||
func (r *replicationState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
// Add a new worker.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case oi, ok := <-r.replicaCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateObject(ctx, oi, objectAPI)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
|
||||
if globalReplicationState == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start with globalReplicationConcurrent.
|
||||
for i := 0; i < globalReplicationConcurrent; i++ {
|
||||
globalReplicationState.addWorker(ctx, objectAPI)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,9 @@ import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
@@ -90,14 +92,16 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
return BucketReplicationSourceNotVersioned{Bucket: bucket}
|
||||
}
|
||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||
if err != nil || vcfg.Status != string(versioning.Enabled) {
|
||||
if isErrBucketNotFound(err) {
|
||||
if err != nil {
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketReplicationTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
@@ -109,6 +113,9 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
if t.Arn == tgt.Arn {
|
||||
return BucketRemoteAlreadyExists{Bucket: t.TargetBucket}
|
||||
}
|
||||
if t.Label == tgt.Label {
|
||||
return BucketRemoteLabelInUse{Bucket: t.TargetBucket}
|
||||
}
|
||||
newtgts[idx] = *tgt
|
||||
found = true
|
||||
continue
|
||||
@@ -169,8 +176,8 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetReplicationTargetClient returns minio-go client for replication target instance
|
||||
func (sys *BucketTargetSys) GetReplicationTargetClient(ctx context.Context, arn string) *miniogo.Core {
|
||||
// GetRemoteTargetClient returns minio-go client for replication target instance
|
||||
func (sys *BucketTargetSys) GetRemoteTargetClient(ctx context.Context, arn string) *miniogo.Core {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
return sys.arnRemotesMap[arn]
|
||||
@@ -196,19 +203,19 @@ func (sys *BucketTargetSys) Init(ctx context.Context, buckets []BucketInfo, objA
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load bucket targets once during boot.
|
||||
sys.load(ctx, buckets, objAPI)
|
||||
// Load bucket targets once during boot in background.
|
||||
go sys.load(ctx, buckets, objAPI)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateTarget updates target to reflect metadata updates
|
||||
func (sys *BucketTargetSys) UpdateTarget(bucket string, cfg *madmin.BucketTargets) {
|
||||
// UpdateAllTargets updates target to reflect metadata updates
|
||||
func (sys *BucketTargetSys) UpdateAllTargets(bucket string, tgts *madmin.BucketTargets) {
|
||||
if sys == nil {
|
||||
return
|
||||
}
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
if cfg == nil || cfg.Empty() {
|
||||
if tgts == nil || tgts.Empty() {
|
||||
// remove target and arn association
|
||||
if tgts, ok := sys.targetsMap[bucket]; ok {
|
||||
for _, t := range tgts {
|
||||
@@ -219,10 +226,10 @@ func (sys *BucketTargetSys) UpdateTarget(bucket string, cfg *madmin.BucketTarget
|
||||
return
|
||||
}
|
||||
|
||||
if len(cfg.Targets) > 0 {
|
||||
sys.targetsMap[bucket] = cfg.Targets
|
||||
if len(tgts.Targets) > 0 {
|
||||
sys.targetsMap[bucket] = tgts.Targets
|
||||
}
|
||||
for _, tgt := range cfg.Targets {
|
||||
for _, tgt := range tgts.Targets {
|
||||
tgtClient, err := sys.getRemoteTargetClient(&tgt)
|
||||
if err != nil {
|
||||
continue
|
||||
@@ -232,7 +239,7 @@ func (sys *BucketTargetSys) UpdateTarget(bucket string, cfg *madmin.BucketTarget
|
||||
sys.clientsCache[tgtClient.EndpointURL().String()] = tgtClient
|
||||
}
|
||||
}
|
||||
sys.targetsMap[bucket] = cfg.Targets
|
||||
sys.targetsMap[bucket] = tgts.Targets
|
||||
}
|
||||
|
||||
// create minio-go clients for buckets having remote targets
|
||||
@@ -275,8 +282,9 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*m
|
||||
creds := credentials.NewStaticV4(config.AccessKey, config.SecretKey, "")
|
||||
|
||||
getRemoteTargetInstanceTransportOnce.Do(func() {
|
||||
getRemoteTargetInstanceTransport = NewGatewayHTTPTransport()
|
||||
getRemoteTargetInstanceTransport = newGatewayHTTPTransport(1 * time.Hour)
|
||||
})
|
||||
|
||||
core, err := miniogo.NewCore(tcfg.Endpoint, &miniogo.Options{
|
||||
Creds: creds,
|
||||
Secure: tcfg.Secure,
|
||||
|
||||
@@ -70,6 +70,14 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
||||
}, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "A replication configuration is present on this bucket, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(v)
|
||||
if err != nil {
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -42,15 +43,11 @@ func init() {
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
logger.RegisterError(config.FmtError)
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(GlobalContext)
|
||||
logger.AddTarget(globalConsoleSys)
|
||||
|
||||
gob.Register(StorageErr(""))
|
||||
}
|
||||
|
||||
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
|
||||
if (globalAutoEncryption || GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
if (GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Encryption support is requested but '%s' does not support encryption", name)
|
||||
}
|
||||
@@ -293,7 +290,7 @@ func logStartupMessage(msg string) {
|
||||
logger.StartupMessage(msg)
|
||||
}
|
||||
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn bool, err error) {
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
|
||||
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
|
||||
return nil, nil, false, nil
|
||||
}
|
||||
@@ -302,11 +299,71 @@ func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn b
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
c, err = certs.New(getPublicCertFile(), getPrivateKeyFile(), config.LoadX509KeyPair)
|
||||
manager, err = certs.NewManager(GlobalContext, getPublicCertFile(), getPrivateKeyFile(), config.LoadX509KeyPair)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
// MinIO has support for multiple certificates. It expects the following structure:
|
||||
// certs/
|
||||
// │
|
||||
// ├─ public.crt
|
||||
// ├─ private.key
|
||||
// │
|
||||
// ├─ example.com/
|
||||
// │ │
|
||||
// │ ├─ public.crt
|
||||
// │ └─ private.key
|
||||
// └─ foobar.org/
|
||||
// │
|
||||
// ├─ public.crt
|
||||
// └─ private.key
|
||||
// ...
|
||||
//
|
||||
// Therefore, we read all filenames in the cert directory and check
|
||||
// for each directory whether it contains a public.crt and private.key.
|
||||
// If so, we try to add it to certificate manager.
|
||||
root, err := os.Open(globalCertsDir.Get())
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
defer root.Close()
|
||||
|
||||
files, err := root.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
for _, file := range files {
|
||||
// Ignore all
|
||||
// - regular files
|
||||
// - "CAs" directory
|
||||
// - any directory which starts with ".."
|
||||
if file.Mode().IsRegular() || file.Name() == "CAs" || strings.HasPrefix(file.Name(), "..") {
|
||||
continue
|
||||
}
|
||||
if file.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
file, err = os.Stat(filepath.Join(root.Name(), file.Name()))
|
||||
if err != nil {
|
||||
// not accessible ignore
|
||||
continue
|
||||
}
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
certFile = filepath.Join(root.Name(), file.Name(), publicCertFile)
|
||||
keyFile = filepath.Join(root.Name(), file.Name(), privateKeyFile)
|
||||
)
|
||||
if !isFile(certFile) || !isFile(keyFile) {
|
||||
continue
|
||||
}
|
||||
if err = manager.AddCertificate(certFile, keyFile); err != nil {
|
||||
err = fmt.Errorf("Unable to load TLS certificate '%s,%s': %w", certFile, keyFile, err)
|
||||
logger.LogIf(GlobalContext, err, logger.Minio)
|
||||
}
|
||||
}
|
||||
secureConn = true
|
||||
return x509Certs, c, secureConn, nil
|
||||
return x509Certs, manager, secureConn, nil
|
||||
}
|
||||
|
||||
@@ -46,7 +46,11 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
type objectDeleter interface {
|
||||
DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error)
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI objectDeleter, configFile string) error {
|
||||
_, err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile, ObjectOptions{})
|
||||
if err != nil && isErrObjectNotFound(err) {
|
||||
return errConfigNotFound
|
||||
|
||||
@@ -25,8 +25,9 @@ import (
|
||||
"github.com/minio/minio/cmd/config/api"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
"github.com/minio/minio/cmd/config/crawler"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/config/etcd"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
xldap "github.com/minio/minio/cmd/config/identity/ldap"
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
@@ -55,6 +56,7 @@ func initHelp() {
|
||||
config.KmsKesSubSys: crypto.DefaultKesKVS,
|
||||
config.LoggerWebhookSubSys: logger.DefaultKVS,
|
||||
config.AuditWebhookSubSys: logger.DefaultAuditKVS,
|
||||
config.CrawlerSubSys: crawler.DefaultKVS,
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
kvs[k] = v
|
||||
@@ -106,6 +108,10 @@ func initHelp() {
|
||||
Key: config.APISubSys,
|
||||
Description: "manage global HTTP API call specific features, such as throttling, authentication types, etc.",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CrawlerSubSys,
|
||||
Description: "manage continuous disk crawling for bucket disk usage, lifecycle, quota and data integrity checks",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.LoggerWebhookSubSys,
|
||||
Description: "send server logs to webhook endpoints",
|
||||
@@ -185,6 +191,7 @@ func initHelp() {
|
||||
config.EtcdSubSys: etcd.Help,
|
||||
config.CacheSubSys: cache.Help,
|
||||
config.CompressionSubSys: compress.Help,
|
||||
config.CrawlerSubSys: crawler.Help,
|
||||
config.IdentityOpenIDSubSys: openid.Help,
|
||||
config.IdentityLDAPSubSys: xldap.Help,
|
||||
config.PolicyOPASubSys: opa.Help,
|
||||
@@ -246,6 +253,10 @@ func validateConfig(s config.Config, setDriveCount int) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := crawler.LookupConfig(s[config.CrawlerSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
{
|
||||
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
|
||||
if err != nil {
|
||||
@@ -306,8 +317,7 @@ func validateConfig(s config.Config, setDriveCount int) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return notify.TestNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport(),
|
||||
globalNotificationSys.ConfiguredTargetIDs())
|
||||
return notify.TestNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), globalNotificationSys.ConfiguredTargetIDs())
|
||||
}
|
||||
|
||||
func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
@@ -322,6 +332,19 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
}
|
||||
}
|
||||
|
||||
if dnsURL, dnsUser, dnsPass, ok := env.LookupEnv(config.EnvDNSWebhook); ok {
|
||||
globalDNSConfig, err = dns.NewOperatorDNS(dnsURL,
|
||||
dns.Authentication(dnsUser, dnsPass),
|
||||
dns.RootCAs(globalRootCAs))
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
logger.FatalIf(err, "Unable to initialize remote webhook DNS config")
|
||||
} else {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
@@ -343,19 +366,25 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil && globalDNSConfig == nil {
|
||||
globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config,
|
||||
dns.DomainNames(globalDomainNames),
|
||||
dns.DomainIPs(globalDomainIPs),
|
||||
dns.DomainPort(globalMinioPort),
|
||||
dns.CoreDNSPath(etcdCfg.CoreDNSPath),
|
||||
)
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
logger.FatalIf(err, "Unable to initialize DNS config")
|
||||
} else {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w",
|
||||
globalDomainNames, err))
|
||||
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
|
||||
if globalDNSConfig != nil {
|
||||
// if global DNS is already configured, indicate with a warning, incase
|
||||
// users are confused.
|
||||
logger.LogIf(ctx, fmt.Errorf("DNS store is already configured with %s, not using etcd for DNS store", globalDNSConfig))
|
||||
} else {
|
||||
globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config,
|
||||
dns.DomainNames(globalDomainNames),
|
||||
dns.DomainIPs(globalDomainIPs),
|
||||
dns.DomainPort(globalMinioPort),
|
||||
dns.CoreDNSPath(etcdCfg.CoreDNSPath),
|
||||
)
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
logger.FatalIf(err, "Unable to initialize DNS config")
|
||||
} else {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w",
|
||||
globalDomainNames, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -378,7 +407,12 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
|
||||
globalAPIConfig.init(apiConfig)
|
||||
globalAPIConfig.init(apiConfig, setDriveCount)
|
||||
|
||||
// Initialize remote instance transport once.
|
||||
getRemoteInstanceTransportOnce.Do(func() {
|
||||
getRemoteInstanceTransport = newGatewayHTTPTransport(apiConfig.RemoteTransportDeadline)
|
||||
})
|
||||
|
||||
if globalIsErasure {
|
||||
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
|
||||
@@ -404,6 +438,10 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
}
|
||||
}
|
||||
}
|
||||
globalCrawlerConfig, err = crawler.LookupConfig(s[config.CrawlerSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to read crawler config: %w", err))
|
||||
}
|
||||
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
|
||||
if err != nil {
|
||||
@@ -417,6 +455,9 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
|
||||
// Enable auto-encryption if enabled
|
||||
globalAutoEncryption = kmsCfg.AutoEncryption
|
||||
if globalAutoEncryption && !globalIsGateway {
|
||||
logger.LogIf(ctx, fmt.Errorf("%s env is deprecated please migrate to using `mc encrypt` at bucket level", crypto.EnvKMSAutoEncryption))
|
||||
}
|
||||
|
||||
globalCompressConfig, err = compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
@@ -452,40 +493,48 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize logger: %w", err))
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.HTTP {
|
||||
for k, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
// Enable http logging
|
||||
logger.AddTarget(
|
||||
http.New(http.WithEndpoint(l.Endpoint),
|
||||
if err = logger.AddTarget(
|
||||
http.New(
|
||||
http.WithTargetName(k),
|
||||
http.WithEndpoint(l.Endpoint),
|
||||
http.WithAuthToken(l.AuthToken),
|
||||
http.WithUserAgent(loggerUserAgent),
|
||||
http.WithLogKind(string(logger.All)),
|
||||
http.WithTransport(NewGatewayHTTPTransport()),
|
||||
),
|
||||
)
|
||||
); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize console HTTP target: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.Audit {
|
||||
for k, l := range loggerCfg.Audit {
|
||||
if l.Enabled {
|
||||
// Enable http audit logging
|
||||
logger.AddAuditTarget(
|
||||
http.New(http.WithEndpoint(l.Endpoint),
|
||||
if err = logger.AddAuditTarget(
|
||||
http.New(
|
||||
http.WithTargetName(k),
|
||||
http.WithEndpoint(l.Endpoint),
|
||||
http.WithAuthToken(l.AuthToken),
|
||||
http.WithUserAgent(loggerUserAgent),
|
||||
http.WithLogKind(string(logger.All)),
|
||||
http.WithTransport(NewGatewayHTTPTransport()),
|
||||
),
|
||||
)
|
||||
); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize audit HTTP target: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
globalConfigTargetList, err = notify.GetNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport(), false)
|
||||
globalConfigTargetList, err = notify.GetNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), false)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
|
||||
}
|
||||
|
||||
globalEnvTargetList, err = notify.GetNotificationTargets(newServerConfig(), GlobalContext.Done(), NewGatewayHTTPTransport(), true)
|
||||
globalEnvTargetList, err = notify.GetNotificationTargets(GlobalContext, newServerConfig(), NewGatewayHTTPTransport(), true)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
|
||||
}
|
||||
|
||||
@@ -29,15 +29,23 @@ import (
|
||||
|
||||
// API sub-system constants
|
||||
const (
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiReadyDeadline = "ready_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiClusterDeadline = "cluster_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRemoteTransportDeadline = "remote_transport_deadline"
|
||||
|
||||
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
|
||||
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
|
||||
EnvAPIReadyDeadline = "MINIO_API_READY_DEADLINE"
|
||||
EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN"
|
||||
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
|
||||
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
|
||||
EnvAPIClusterDeadline = "MINIO_API_CLUSTER_DEADLINE"
|
||||
EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN"
|
||||
EnvAPIRemoteTransportDeadline = "MINIO_API_REMOTE_TRANSPORT_DEADLINE"
|
||||
)
|
||||
|
||||
// Deprecated key and ENVs
|
||||
const (
|
||||
apiReadyDeadline = "ready_deadline"
|
||||
EnvAPIReadyDeadline = "MINIO_API_READY_DEADLINE"
|
||||
)
|
||||
|
||||
// DefaultKVS - default storage class config
|
||||
@@ -52,22 +60,27 @@ var (
|
||||
Value: "10s",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiReadyDeadline,
|
||||
Key: apiClusterDeadline,
|
||||
Value: "10s",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiCorsAllowOrigin,
|
||||
Value: "*",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiRemoteTransportDeadline,
|
||||
Value: "2h",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// Config storage class configuration
|
||||
type Config struct {
|
||||
APIRequestsMax int `json:"requests_max"`
|
||||
APIRequestsDeadline time.Duration `json:"requests_deadline"`
|
||||
APIReadyDeadline time.Duration `json:"ready_deadline"`
|
||||
APICorsAllowOrigin []string `json:"cors_allow_origin"`
|
||||
RequestsMax int `json:"requests_max"`
|
||||
RequestsDeadline time.Duration `json:"requests_deadline"`
|
||||
ClusterDeadline time.Duration `json:"cluster_deadline"`
|
||||
CorsAllowOrigin []string `json:"cors_allow_origin"`
|
||||
RemoteTransportDeadline time.Duration `json:"remote_transport_deadline"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
|
||||
@@ -83,6 +96,9 @@ func (sCfg *Config) UnmarshalJSON(data []byte) error {
|
||||
|
||||
// LookupConfig - lookup api config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
// remove this since we have removed this already.
|
||||
kvs.Delete(apiReadyDeadline)
|
||||
|
||||
if err = config.CheckValidKeys(config.APISubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
@@ -102,16 +118,23 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
readyDeadline, err := time.ParseDuration(env.Get(EnvAPIReadyDeadline, kvs.Get(apiReadyDeadline)))
|
||||
clusterDeadline, err := time.ParseDuration(env.Get(EnvAPIClusterDeadline, kvs.Get(apiClusterDeadline)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
corsAllowOrigin := strings.Split(env.Get(EnvAPICorsAllowOrigin, kvs.Get(apiCorsAllowOrigin)), ",")
|
||||
|
||||
remoteTransportDeadline, err := time.ParseDuration(env.Get(EnvAPIRemoteTransportDeadline, kvs.Get(apiRemoteTransportDeadline)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
return Config{
|
||||
APIRequestsMax: requestsMax,
|
||||
APIRequestsDeadline: requestsDeadline,
|
||||
APIReadyDeadline: readyDeadline,
|
||||
APICorsAllowOrigin: corsAllowOrigin,
|
||||
RequestsMax: requestsMax,
|
||||
RequestsDeadline: requestsDeadline,
|
||||
ClusterDeadline: clusterDeadline,
|
||||
CorsAllowOrigin: corsAllowOrigin,
|
||||
RemoteTransportDeadline: remoteTransportDeadline,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -39,5 +39,11 @@ var (
|
||||
Optional: true,
|
||||
Type: "csv",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: apiRemoteTransportDeadline,
|
||||
Description: `set the deadline for API requests on remote transports while proxying between federated instances e.g. "2h"`,
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -24,8 +24,6 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
@@ -69,38 +67,6 @@ func ParsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err er
|
||||
return x509Certs, nil
|
||||
}
|
||||
|
||||
// GetRootCAs - returns all the root CAs into certPool
|
||||
// at the input certsCADir
|
||||
func GetRootCAs(certsCAsDir string) (*x509.CertPool, error) {
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
if rootCAs == nil {
|
||||
// In some systems (like Windows) system cert pool is
|
||||
// not supported or no certificates are present on the
|
||||
// system - so we create a new cert pool.
|
||||
rootCAs = x509.NewCertPool()
|
||||
}
|
||||
|
||||
fis, err := ioutil.ReadDir(certsCAsDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
// Return success if CA's directory is missing or permission denied.
|
||||
err = nil
|
||||
}
|
||||
return rootCAs, err
|
||||
}
|
||||
|
||||
// Load all custom CA files.
|
||||
for _, fi := range fis {
|
||||
caCert, err := ioutil.ReadFile(path.Join(certsCAsDir, fi.Name()))
|
||||
if err != nil {
|
||||
// ignore files which are not readable.
|
||||
continue
|
||||
}
|
||||
rootCAs.AppendCertsFromPEM(caCert)
|
||||
}
|
||||
return rootCAs, nil
|
||||
}
|
||||
|
||||
// LoadX509KeyPair - load an X509 key pair (private key , certificate)
|
||||
// from the provided paths. The private key may be encrypted and is
|
||||
// decrypted using the ENV_VAR: MINIO_CERT_PASSWD.
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
@@ -194,60 +193,6 @@ M9ofSEt/bdRD
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRootCAs(t *testing.T) {
|
||||
emptydir, err := ioutil.TempDir("", "test-get-root-cas")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable create temp directory. %v", emptydir)
|
||||
}
|
||||
defer os.RemoveAll(emptydir)
|
||||
|
||||
dir1, err := ioutil.TempDir("", "test-get-root-cas")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable create temp directory. %v", dir1)
|
||||
}
|
||||
defer os.RemoveAll(dir1)
|
||||
if err = os.Mkdir(filepath.Join(dir1, "empty-dir"), 0755); err != nil {
|
||||
t.Fatalf("Unable create empty dir. %v", err)
|
||||
}
|
||||
|
||||
dir2, err := ioutil.TempDir("", "test-get-root-cas")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable create temp directory. %v", dir2)
|
||||
}
|
||||
defer os.RemoveAll(dir2)
|
||||
if err = ioutil.WriteFile(filepath.Join(dir2, "empty-file"), []byte{}, 0644); err != nil {
|
||||
t.Fatalf("Unable create test file. %v", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
certCAsDir string
|
||||
expectedErr error
|
||||
}{
|
||||
// ignores non-existent directories.
|
||||
{"nonexistent-dir", nil},
|
||||
// Ignores directories.
|
||||
{dir1, nil},
|
||||
// Ignore empty directory.
|
||||
{emptydir, nil},
|
||||
// Loads the cert properly.
|
||||
{dir2, nil},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
_, err := GetRootCAs(testCase.certCAsDir)
|
||||
|
||||
if testCase.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
}
|
||||
} else if err == nil {
|
||||
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
|
||||
} else if testCase.expectedErr.Error() != err.Error() {
|
||||
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadX509KeyPair(t *testing.T) {
|
||||
for i, testCase := range loadX509KeyPairTests {
|
||||
privateKey, err := createTempFile("private.key", testCase.privateKey)
|
||||
|
||||
@@ -76,6 +76,7 @@ const (
|
||||
KmsKesSubSys = "kms_kes"
|
||||
LoggerWebhookSubSys = "logger_webhook"
|
||||
AuditWebhookSubSys = "audit_webhook"
|
||||
CrawlerSubSys = "crawler"
|
||||
|
||||
// Add new constants here if you add new fields to config.
|
||||
)
|
||||
@@ -112,6 +113,7 @@ var SubSystems = set.CreateStringSet([]string{
|
||||
PolicyOPASubSys,
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
CrawlerSubSys,
|
||||
NotifyAMQPSubSys,
|
||||
NotifyESSubSys,
|
||||
NotifyKafkaSubSys,
|
||||
@@ -138,6 +140,7 @@ var SubSystemsSingleTargets = set.CreateStringSet([]string{
|
||||
PolicyOPASubSys,
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
CrawlerSubSys,
|
||||
}...)
|
||||
|
||||
// Constant separators
|
||||
@@ -264,6 +267,16 @@ func (kvs KVS) Get(key string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Delete - deletes the key if present from the KV list.
|
||||
func (kvs *KVS) Delete(key string) {
|
||||
for i, kv := range *kvs {
|
||||
if kv.Key == key {
|
||||
*kvs = append((*kvs)[:i], (*kvs)[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup - lookup a key in a list of KVS
|
||||
func (kvs KVS) Lookup(key string) (string, bool) {
|
||||
for _, kv := range kvs {
|
||||
@@ -448,6 +461,11 @@ func (c Config) Merge() Config {
|
||||
ckvs.Set(kv.Key, kv.Value)
|
||||
}
|
||||
}
|
||||
if _, ok := cp[subSys]; !ok {
|
||||
// A config subsystem was removed or server was downgraded.
|
||||
Logger.Info("config: ignoring unknown subsystem config %q\n", subSys)
|
||||
continue
|
||||
}
|
||||
cp[subSys][tgt] = ckvs
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ const (
|
||||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvFSOSync = "MINIO_FS_OSYNC"
|
||||
EnvArgs = "MINIO_ARGS"
|
||||
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
|
||||
|
||||
EnvUpdate = "MINIO_UPDATE"
|
||||
|
||||
|
||||
67
cmd/config/crawler/crawler.go
Normal file
67
cmd/config/crawler/crawler.go
Normal file
@@ -0,0 +1,67 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package crawler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
)
|
||||
|
||||
// Compression environment variables
|
||||
const (
|
||||
BitrotScan = "bitrotscan"
|
||||
)
|
||||
|
||||
// Config represents the crawler settings.
|
||||
type Config struct {
|
||||
// Bitrot will perform bitrot scan on local disk when checking objects.
|
||||
Bitrot bool `json:"bitrotscan"`
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultKVS - default KV config for crawler settings
|
||||
DefaultKVS = config.KVS{
|
||||
config.KV{
|
||||
Key: BitrotScan,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
}
|
||||
|
||||
// Help provides help for config values
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: BitrotScan,
|
||||
Description: `perform bitrot scan on disks when checking objects during crawl`,
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// LookupConfig - lookup config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
if err = config.CheckValidKeys(config.CrawlerSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
bitrot := kvs.Get(BitrotScan)
|
||||
if bitrot != config.EnableOn && bitrot != config.EnableOff {
|
||||
return cfg, errors.New(BitrotScan + ": must be 'on' or 'off'")
|
||||
}
|
||||
cfg.Bitrot = bitrot == config.EnableOn
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -26,9 +26,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
|
||||
"github.com/coredns/coredns/plugin/etcd/msg"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"go.etcd.io/etcd/v3/clientv3"
|
||||
)
|
||||
|
||||
@@ -214,6 +213,11 @@ func (c *CoreDNS) DeleteRecord(record SrvRecord) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// String stringer name for this implementation of dns.Store
|
||||
func (c *CoreDNS) String() string {
|
||||
return "etcdDNS"
|
||||
}
|
||||
|
||||
// CoreDNS - represents dns config for coredns server.
|
||||
type CoreDNS struct {
|
||||
domainNames []string
|
||||
@@ -223,13 +227,13 @@ type CoreDNS struct {
|
||||
etcdClient *clientv3.Client
|
||||
}
|
||||
|
||||
// Option - functional options pattern style
|
||||
type Option func(*CoreDNS)
|
||||
// EtcdOption - functional options pattern style
|
||||
type EtcdOption func(*CoreDNS)
|
||||
|
||||
// DomainNames set a list of domain names used by this CoreDNS
|
||||
// client setting, note this will fail if set to empty when
|
||||
// constructor initializes.
|
||||
func DomainNames(domainNames []string) Option {
|
||||
func DomainNames(domainNames []string) EtcdOption {
|
||||
return func(args *CoreDNS) {
|
||||
args.domainNames = domainNames
|
||||
}
|
||||
@@ -237,14 +241,14 @@ func DomainNames(domainNames []string) Option {
|
||||
|
||||
// DomainIPs set a list of custom domain IPs, note this will
|
||||
// fail if set to empty when constructor initializes.
|
||||
func DomainIPs(domainIPs set.StringSet) Option {
|
||||
func DomainIPs(domainIPs set.StringSet) EtcdOption {
|
||||
return func(args *CoreDNS) {
|
||||
args.domainIPs = domainIPs
|
||||
}
|
||||
}
|
||||
|
||||
// DomainPort - is a string version of server port
|
||||
func DomainPort(domainPort string) Option {
|
||||
func DomainPort(domainPort string) EtcdOption {
|
||||
return func(args *CoreDNS) {
|
||||
args.domainPort = domainPort
|
||||
}
|
||||
@@ -253,14 +257,14 @@ func DomainPort(domainPort string) Option {
|
||||
// CoreDNSPath - custom prefix on etcd to populate DNS
|
||||
// service records, optional and can be empty.
|
||||
// if empty then c.prefixPath is used i.e "/skydns"
|
||||
func CoreDNSPath(prefix string) Option {
|
||||
func CoreDNSPath(prefix string) EtcdOption {
|
||||
return func(args *CoreDNS) {
|
||||
args.prefixPath = prefix
|
||||
}
|
||||
}
|
||||
|
||||
// NewCoreDNS - initialize a new coreDNS set/unset values.
|
||||
func NewCoreDNS(cfg clientv3.Config, setters ...Option) (Store, error) {
|
||||
func NewCoreDNS(cfg clientv3.Config, setters ...EtcdOption) (Store, error) {
|
||||
etcdClient, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
234
cmd/config/dns/operator_dns.go
Normal file
234
cmd/config/dns/operator_dns.go
Normal file
@@ -0,0 +1,234 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultOperatorContextTimeout = 10 * time.Second
|
||||
// ErrNotImplemented - Indicates the functionality which is not implemented
|
||||
ErrNotImplemented = errors.New("The method is not implemented")
|
||||
)
|
||||
|
||||
func (c *OperatorDNS) addAuthHeader(r *http.Request) error {
|
||||
if c.username == "" || c.password == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
claims := &jwt.StandardClaims{
|
||||
ExpiresAt: int64(15 * time.Minute),
|
||||
Issuer: c.username,
|
||||
Subject: config.EnvDNSWebhook,
|
||||
}
|
||||
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS512, claims)
|
||||
ss, err := token.SignedString([]byte(c.password))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Header.Set("Authorization", "Bearer "+ss)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *OperatorDNS) endpoint(bucket string, delete bool) (string, error) {
|
||||
u, err := url.Parse(c.Endpoint)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
q := u.Query()
|
||||
q.Add("bucket", bucket)
|
||||
q.Add("delete", strconv.FormatBool(delete))
|
||||
u.RawQuery = q.Encode()
|
||||
return u.String(), nil
|
||||
}
|
||||
|
||||
// Put - Adds DNS entries into operator webhook server
|
||||
func (c *OperatorDNS) Put(bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultOperatorContextTimeout)
|
||||
defer cancel()
|
||||
e, err := c.endpoint(bucket, false)
|
||||
if err != nil {
|
||||
return newError(bucket, err)
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e, nil)
|
||||
if err != nil {
|
||||
return newError(bucket, err)
|
||||
}
|
||||
if err = c.addAuthHeader(req); err != nil {
|
||||
return newError(bucket, err)
|
||||
}
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
if derr := c.Delete(bucket); derr != nil {
|
||||
return newError(bucket, derr)
|
||||
}
|
||||
}
|
||||
var errorStringBuilder strings.Builder
|
||||
io.Copy(&errorStringBuilder, io.LimitReader(resp.Body, resp.ContentLength))
|
||||
xhttp.DrainBody(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
errorString := errorStringBuilder.String()
|
||||
return newError(bucket, fmt.Errorf("service create for bucket %s, failed with status %s, error %s", bucket, resp.Status, errorString))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newError(bucket string, err error) error {
|
||||
e := Error{bucket, err}
|
||||
if strings.Contains(err.Error(), "invalid bucket name") {
|
||||
return ErrInvalidBucketName(e)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// Delete - Removes DNS entries added in Put().
|
||||
func (c *OperatorDNS) Delete(bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultOperatorContextTimeout)
|
||||
defer cancel()
|
||||
e, err := c.endpoint(bucket, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = c.addAuthHeader(req); err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
xhttp.DrainBody(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("request to delete the service for bucket %s, failed with status %s", bucket, resp.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRecord - Removes a specific DNS entry
|
||||
// No Op for Operator because operator deals on with bucket entries
|
||||
func (c *OperatorDNS) DeleteRecord(record SrvRecord) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
// Close closes the internal http client
|
||||
func (c *OperatorDNS) Close() error {
|
||||
c.httpClient.CloseIdleConnections()
|
||||
return nil
|
||||
}
|
||||
|
||||
// List - Retrieves list of DNS entries for the domain.
|
||||
// This is a No Op for Operator because, there is no intent to enforce global
|
||||
// namespace at MinIO level with this DNS entry. The global namespace in
|
||||
// enforced by the Kubernetes Operator
|
||||
func (c *OperatorDNS) List() (srvRecords map[string][]SrvRecord, err error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
|
||||
// Get - Retrieves DNS records for a bucket.
|
||||
// This is a No Op for Operator because, there is no intent to enforce global
|
||||
// namespace at MinIO level with this DNS entry. The global namespace in
|
||||
// enforced by the Kubernetes Operator
|
||||
func (c *OperatorDNS) Get(bucket string) (srvRecords []SrvRecord, err error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
|
||||
// String stringer name for this implementation of dns.Store
|
||||
func (c *OperatorDNS) String() string {
|
||||
return "webhookDNS"
|
||||
}
|
||||
|
||||
// OperatorDNS - represents dns config for MinIO k8s operator.
|
||||
type OperatorDNS struct {
|
||||
httpClient *http.Client
|
||||
Endpoint string
|
||||
rootCAs *x509.CertPool
|
||||
username string
|
||||
password string
|
||||
}
|
||||
|
||||
// OperatorOption - functional options pattern style for OperatorDNS
|
||||
type OperatorOption func(*OperatorDNS)
|
||||
|
||||
// Authentication - custom username and password for authenticating at the endpoint
|
||||
func Authentication(username, password string) OperatorOption {
|
||||
return func(args *OperatorDNS) {
|
||||
args.username = username
|
||||
args.password = password
|
||||
}
|
||||
}
|
||||
|
||||
// RootCAs - add custom trust certs pool
|
||||
func RootCAs(CAs *x509.CertPool) OperatorOption {
|
||||
return func(args *OperatorDNS) {
|
||||
args.rootCAs = CAs
|
||||
}
|
||||
}
|
||||
|
||||
// NewOperatorDNS - initialize a new K8S Operator DNS set/unset values.
|
||||
func NewOperatorDNS(endpoint string, setters ...OperatorOption) (Store, error) {
|
||||
if endpoint == "" {
|
||||
return nil, errors.New("invalid argument")
|
||||
}
|
||||
|
||||
args := &OperatorDNS{
|
||||
Endpoint: endpoint,
|
||||
}
|
||||
for _, setter := range setters {
|
||||
setter(args)
|
||||
}
|
||||
args.httpClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 3 * time.Second,
|
||||
KeepAlive: 5 * time.Second,
|
||||
}).DialContext,
|
||||
ResponseHeaderTimeout: 3 * time.Second,
|
||||
TLSHandshakeTimeout: 3 * time.Second,
|
||||
ExpectContinueTimeout: 3 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: args.rootCAs,
|
||||
},
|
||||
// Go net/http automatically unzip if content-type is
|
||||
// gzip disable this feature, as we are always interested
|
||||
// in raw stream.
|
||||
DisableCompression: true,
|
||||
},
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
44
cmd/config/dns/store.go
Normal file
44
cmd/config/dns/store.go
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
// Error - DNS related errors error.
|
||||
type Error struct {
|
||||
Bucket string
|
||||
Err error
|
||||
}
|
||||
|
||||
// ErrInvalidBucketName for buckets with invalid name
|
||||
type ErrInvalidBucketName Error
|
||||
|
||||
func (e ErrInvalidBucketName) Error() string {
|
||||
return "invalid bucket name error: " + e.Err.Error()
|
||||
}
|
||||
func (e Error) Error() string {
|
||||
return "dns related error: " + e.Err.Error()
|
||||
}
|
||||
|
||||
// Store dns record store
|
||||
type Store interface {
|
||||
Put(bucket string) error
|
||||
Get(bucket string) ([]SrvRecord, error)
|
||||
Delete(bucket string) error
|
||||
List() (map[string][]SrvRecord, error)
|
||||
DeleteRecord(record SrvRecord) error
|
||||
Close() error
|
||||
String() string
|
||||
}
|
||||
@@ -19,7 +19,6 @@ package config
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
@@ -111,18 +110,16 @@ func ErrorToErr(err error) Err {
|
||||
case *net.OpError:
|
||||
return ErrPortAccess(err).Msg("Insufficient permissions to use specified port")
|
||||
}
|
||||
return ErrNoPermissionsToAccessDirFiles(err).Msg("Insufficient permissions to access path")
|
||||
} else if errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
return ErrUnexpectedDataContent(err)
|
||||
} else {
|
||||
// Failed to identify what type of error this, return a simple UI error
|
||||
return Err{msg: err.Error()}
|
||||
}
|
||||
|
||||
// Failed to identify what type of error this, return a simple UI error
|
||||
return Err{msg: err.Error()}
|
||||
}
|
||||
|
||||
// FmtError converts a fatal error message to a more clear error
|
||||
// using some colors
|
||||
func FmtError(introMsg string, err error, jsonFlag bool) string {
|
||||
|
||||
renderedTxt := ""
|
||||
uiErr := ErrorToErr(err)
|
||||
// JSON print
|
||||
|
||||
@@ -205,12 +205,6 @@ Example 1:
|
||||
`Use 'sudo setcap cap_net_bind_service=+ep /path/to/minio' to provide sufficient permissions`,
|
||||
)
|
||||
|
||||
ErrNoPermissionsToAccessDirFiles = newErrFn(
|
||||
"Missing permissions to access the specified path",
|
||||
"Please ensure the specified path can be accessed",
|
||||
"",
|
||||
)
|
||||
|
||||
ErrSSLUnexpectedError = newErrFn(
|
||||
"Invalid TLS certificate",
|
||||
"Please check the content of your certificate data",
|
||||
@@ -247,12 +241,6 @@ Example 1:
|
||||
"",
|
||||
)
|
||||
|
||||
ErrUnexpectedDataContent = newErrFn(
|
||||
"Unexpected data content",
|
||||
"Please contact MinIO at https://slack.min.io",
|
||||
"",
|
||||
)
|
||||
|
||||
ErrUnexpectedError = newErrFn(
|
||||
"Unexpected error",
|
||||
"Please contact MinIO at https://slack.min.io",
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
@@ -49,10 +50,14 @@ type Config struct {
|
||||
publicKeys map[string]crypto.PublicKey
|
||||
transport *http.Transport
|
||||
closeRespFn func(io.ReadCloser)
|
||||
mutex *sync.Mutex
|
||||
}
|
||||
|
||||
// PopulatePublicKey - populates a new publickey from the JWKS URL.
|
||||
func (r *Config) PopulatePublicKey() error {
|
||||
r.mutex.Lock()
|
||||
defer r.mutex.Unlock()
|
||||
|
||||
if r.JWKS.URL == nil || r.JWKS.URL.String() == "" {
|
||||
return nil
|
||||
}
|
||||
@@ -185,7 +190,15 @@ func (p *JWT) Validate(token, dsecs string) (map[string]interface{}, error) {
|
||||
var claims jwtgo.MapClaims
|
||||
jwtToken, err := jp.ParseWithClaims(token, &claims, keyFuncCallback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Re-populate the public key in-case the JWKS
|
||||
// pubkeys are refreshed
|
||||
if err = p.PopulatePublicKey(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jwtToken, err = jwtgo.ParseWithClaims(token, &claims, keyFuncCallback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !jwtToken.Valid {
|
||||
@@ -317,6 +330,7 @@ func LookupConfig(kvs config.KVS, transport *http.Transport, closeRespFn func(io
|
||||
ClientID: env.Get(EnvIdentityOpenIDClientID, kvs.Get(ClientID)),
|
||||
transport: transport,
|
||||
closeRespFn: closeRespFn,
|
||||
mutex: &sync.Mutex{}, // allocate for copying
|
||||
}
|
||||
|
||||
configURL := env.Get(EnvIdentityOpenIDURL, kvs.Get(ConfigURL))
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"crypto"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -89,6 +90,7 @@ func TestJWTAzureFail(t *testing.T) {
|
||||
}
|
||||
|
||||
cfg := Config{}
|
||||
cfg.mutex = &sync.Mutex{}
|
||||
cfg.JWKS.URL = u1
|
||||
cfg.publicKeys = keys
|
||||
jwt := NewJWT(cfg)
|
||||
@@ -136,6 +138,7 @@ func TestJWT(t *testing.T) {
|
||||
}
|
||||
|
||||
cfg := Config{}
|
||||
cfg.mutex = &sync.Mutex{}
|
||||
cfg.JWKS.URL = u1
|
||||
cfg.publicKeys = keys
|
||||
jwt := NewJWT(cfg)
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package dns
|
||||
package config
|
||||
|
||||
// Store dns record store
|
||||
type Store interface {
|
||||
Put(bucket string) error
|
||||
Get(bucket string) ([]SrvRecord, error)
|
||||
Delete(bucket string) error
|
||||
List() (map[string][]SrvRecord, error)
|
||||
DeleteRecord(record SrvRecord) error
|
||||
Close() error
|
||||
import "context"
|
||||
|
||||
// Logger contains injected logger methods.
|
||||
var Logger = struct {
|
||||
Info func(msg string, data ...interface{})
|
||||
LogIf func(ctx context.Context, err error, errKind ...interface{})
|
||||
}{
|
||||
// Initialized via injection.
|
||||
}
|
||||
@@ -340,6 +340,12 @@ var (
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.PostgresMaxOpenConnections,
|
||||
Description: "To set the maximum number of open connections to the database. The value is set to `2` by default.",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
}
|
||||
|
||||
HelpMySQL = config.HelpKVS{
|
||||
@@ -377,6 +383,12 @@ var (
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MySQLMaxOpenConnections,
|
||||
Description: "To set the maximum number of open connections to the database. The value is set to `2` by default.",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
}
|
||||
|
||||
HelpNATS = config.HelpKVS{
|
||||
@@ -559,6 +571,18 @@ var (
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.ElasticUsername,
|
||||
Description: "username for Elasticsearch basic-auth",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.ElasticPassword,
|
||||
Description: "password for Elasticsearch basic-auth",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
|
||||
@@ -201,6 +201,14 @@ func SetNotifyES(s config.Config, esName string, cfg target.ElasticsearchArgs) e
|
||||
Key: target.ElasticQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticUsername,
|
||||
Value: cfg.Username,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticPassword,
|
||||
Value: cfg.Password,
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -349,6 +357,10 @@ func SetNotifyPostgres(s config.Config, psqName string, cfg target.PostgreSQLArg
|
||||
Key: target.PostgresQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresMaxOpenConnections,
|
||||
Value: strconv.Itoa(cfg.MaxOpenConnections),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -546,6 +558,10 @@ func SetNotifyMySQL(s config.Config, sqlName string, cfg target.MySQLArgs) error
|
||||
Key: target.MySQLQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLMaxOpenConnections,
|
||||
Value: strconv.Itoa(cfg.MaxOpenConnections),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -43,11 +43,10 @@ var ErrTargetsOffline = errors.New("one or more targets are offline. Please use
|
||||
|
||||
// TestNotificationTargets is similar to GetNotificationTargets()
|
||||
// avoids explicit registration.
|
||||
func TestNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport,
|
||||
targetIDs []event.TargetID) error {
|
||||
func TestNotificationTargets(ctx context.Context, cfg config.Config, transport *http.Transport, targetIDs []event.TargetID) error {
|
||||
test := true
|
||||
returnOnTargetError := true
|
||||
targets, err := RegisterNotificationTargets(cfg, doneCh, transport, targetIDs, test, returnOnTargetError)
|
||||
targets, err := RegisterNotificationTargets(ctx, cfg, transport, targetIDs, test, returnOnTargetError)
|
||||
if err == nil {
|
||||
// Close all targets since we are only testing connections.
|
||||
for _, t := range targets.TargetMap() {
|
||||
@@ -60,9 +59,9 @@ func TestNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transpor
|
||||
|
||||
// GetNotificationTargets registers and initializes all notification
|
||||
// targets, returns error if any.
|
||||
func GetNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, test bool) (*event.TargetList, error) {
|
||||
func GetNotificationTargets(ctx context.Context, cfg config.Config, transport *http.Transport, test bool) (*event.TargetList, error) {
|
||||
returnOnTargetError := false
|
||||
return RegisterNotificationTargets(cfg, doneCh, transport, nil, test, returnOnTargetError)
|
||||
return RegisterNotificationTargets(ctx, cfg, transport, nil, test, returnOnTargetError)
|
||||
}
|
||||
|
||||
// RegisterNotificationTargets - returns TargetList which contains enabled targets in serverConfig.
|
||||
@@ -70,8 +69,8 @@ func GetNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
// * Add a new target in pkg/event/target package.
|
||||
// * Add newly added target configuration to serverConfig.Notify.<TARGET_NAME>.
|
||||
// * Handle the configuration in this function to create/add into TargetList.
|
||||
func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, targetIDs []event.TargetID, test bool, returnOnTargetError bool) (*event.TargetList, error) {
|
||||
targetList, err := FetchRegisteredTargets(cfg, doneCh, transport, test, returnOnTargetError)
|
||||
func RegisterNotificationTargets(ctx context.Context, cfg config.Config, transport *http.Transport, targetIDs []event.TargetID, test bool, returnOnTargetError bool) (*event.TargetList, error) {
|
||||
targetList, err := FetchRegisteredTargets(ctx, cfg, transport, test, returnOnTargetError)
|
||||
if err != nil {
|
||||
return targetList, err
|
||||
}
|
||||
@@ -94,7 +93,7 @@ func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, tran
|
||||
// FetchRegisteredTargets - Returns a set of configured TargetList
|
||||
// If `returnOnTargetError` is set to true, The function returns when a target initialization fails
|
||||
// Else, the function will return a complete TargetList irrespective of errors
|
||||
func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, test bool, returnOnTargetError bool) (_ *event.TargetList, err error) {
|
||||
func FetchRegisteredTargets(ctx context.Context, cfg config.Config, transport *http.Transport, test bool, returnOnTargetError bool) (_ *event.TargetList, err error) {
|
||||
targetList := event.NewTargetList()
|
||||
var targetsOffline bool
|
||||
|
||||
@@ -118,7 +117,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
return nil, err
|
||||
}
|
||||
|
||||
esTargets, err := GetNotifyES(cfg[config.NotifyESSubSys])
|
||||
esTargets, err := GetNotifyES(cfg[config.NotifyESSubSys], transport)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -167,7 +166,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewAMQPTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewAMQPTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -188,7 +187,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewElasticsearchTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewElasticsearchTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -209,7 +208,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
continue
|
||||
}
|
||||
args.TLS.RootCAs = transport.TLSClientConfig.RootCAs
|
||||
newTarget, err := target.NewKafkaTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewKafkaTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -230,7 +229,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
continue
|
||||
}
|
||||
args.RootCAs = transport.TLSClientConfig.RootCAs
|
||||
newTarget, err := target.NewMQTTTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewMQTTTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -250,7 +249,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewMySQLTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewMySQLTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -270,7 +269,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewNATSTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewNATSTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -290,7 +289,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewNSQTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewNSQTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -310,7 +309,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -330,7 +329,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewRedisTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewRedisTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -350,7 +349,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewWebhookTarget(id, args, doneCh, logger.LogOnceIf, transport, test)
|
||||
newTarget, err := target.NewWebhookTarget(ctx, id, args, logger.LogOnceIf, transport, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -808,6 +807,10 @@ var (
|
||||
Key: target.MySQLQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLMaxOpenConnections,
|
||||
Value: "2",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -856,13 +859,25 @@ func GetNotifyMySQL(mysqlKVS map[string]config.KVS) (map[string]target.MySQLArgs
|
||||
if k != config.Default {
|
||||
queueDirEnv = queueDirEnv + config.Default + k
|
||||
}
|
||||
|
||||
maxOpenConnectionsEnv := target.EnvMySQLMaxOpenConnections
|
||||
if k != config.Default {
|
||||
maxOpenConnectionsEnv = maxOpenConnectionsEnv + config.Default + k
|
||||
}
|
||||
|
||||
maxOpenConnections, cErr := strconv.Atoi(env.Get(maxOpenConnectionsEnv, kv.Get(target.MySQLMaxOpenConnections)))
|
||||
if cErr != nil {
|
||||
return nil, cErr
|
||||
}
|
||||
|
||||
mysqlArgs := target.MySQLArgs{
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.MySQLFormat)),
|
||||
DSN: env.Get(dsnStringEnv, kv.Get(target.MySQLDSNString)),
|
||||
Table: env.Get(tableEnv, kv.Get(target.MySQLTable)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.MySQLQueueDir)),
|
||||
QueueLimit: queueLimit,
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.MySQLFormat)),
|
||||
DSN: env.Get(dsnStringEnv, kv.Get(target.MySQLDSNString)),
|
||||
Table: env.Get(tableEnv, kv.Get(target.MySQLTable)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.MySQLQueueDir)),
|
||||
QueueLimit: queueLimit,
|
||||
MaxOpenConnections: maxOpenConnections,
|
||||
}
|
||||
if err = mysqlArgs.Validate(); err != nil {
|
||||
return nil, err
|
||||
@@ -1236,6 +1251,10 @@ var (
|
||||
Key: target.PostgresQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresMaxOpenConnections,
|
||||
Value: "2",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1286,13 +1305,24 @@ func GetNotifyPostgres(postgresKVS map[string]config.KVS) (map[string]target.Pos
|
||||
queueDirEnv = queueDirEnv + config.Default + k
|
||||
}
|
||||
|
||||
maxOpenConnectionsEnv := target.EnvPostgresMaxOpenConnections
|
||||
if k != config.Default {
|
||||
maxOpenConnectionsEnv = maxOpenConnectionsEnv + config.Default + k
|
||||
}
|
||||
|
||||
maxOpenConnections, cErr := strconv.Atoi(env.Get(maxOpenConnectionsEnv, kv.Get(target.PostgresMaxOpenConnections)))
|
||||
if cErr != nil {
|
||||
return nil, cErr
|
||||
}
|
||||
|
||||
psqlArgs := target.PostgreSQLArgs{
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.PostgresFormat)),
|
||||
ConnectionString: env.Get(connectionStringEnv, kv.Get(target.PostgresConnectionString)),
|
||||
Table: env.Get(tableEnv, kv.Get(target.PostgresTable)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.PostgresQueueDir)),
|
||||
QueueLimit: uint64(queueLimit),
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.PostgresFormat)),
|
||||
ConnectionString: env.Get(connectionStringEnv, kv.Get(target.PostgresConnectionString)),
|
||||
Table: env.Get(tableEnv, kv.Get(target.PostgresTable)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.PostgresQueueDir)),
|
||||
QueueLimit: uint64(queueLimit),
|
||||
MaxOpenConnections: maxOpenConnections,
|
||||
}
|
||||
if err = psqlArgs.Validate(); err != nil {
|
||||
return nil, err
|
||||
@@ -1532,11 +1562,19 @@ var (
|
||||
Key: target.ElasticQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticUsername,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticPassword,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// GetNotifyES - returns a map of registered notification 'elasticsearch' targets
|
||||
func GetNotifyES(esKVS map[string]config.KVS) (map[string]target.ElasticsearchArgs, error) {
|
||||
func GetNotifyES(esKVS map[string]config.KVS, transport *http.Transport) (map[string]target.ElasticsearchArgs, error) {
|
||||
esTargets := make(map[string]target.ElasticsearchArgs)
|
||||
for k, kv := range mergeTargets(esKVS, target.EnvElasticEnable, DefaultESKVS) {
|
||||
enableEnv := target.EnvElasticEnable
|
||||
@@ -1586,6 +1624,16 @@ func GetNotifyES(esKVS map[string]config.KVS) (map[string]target.ElasticsearchAr
|
||||
queueDirEnv = queueDirEnv + config.Default + k
|
||||
}
|
||||
|
||||
usernameEnv := target.EnvElasticUsername
|
||||
if k != config.Default {
|
||||
usernameEnv = usernameEnv + config.Default + k
|
||||
}
|
||||
|
||||
passwordEnv := target.EnvElasticPassword
|
||||
if k != config.Default {
|
||||
passwordEnv = passwordEnv + config.Default + k
|
||||
}
|
||||
|
||||
esArgs := target.ElasticsearchArgs{
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.ElasticFormat)),
|
||||
@@ -1593,6 +1641,9 @@ func GetNotifyES(esKVS map[string]config.KVS) (map[string]target.ElasticsearchAr
|
||||
Index: env.Get(indexEnv, kv.Get(target.ElasticIndex)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.ElasticQueueDir)),
|
||||
QueueLimit: uint64(queueLimit),
|
||||
Transport: transport,
|
||||
Username: env.Get(usernameEnv, kv.Get(target.ElasticUsername)),
|
||||
Password: env.Get(passwordEnv, kv.Get(target.ElasticPassword)),
|
||||
}
|
||||
if err = esArgs.Validate(); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -156,7 +156,7 @@ func parseStorageClass(storageClassEnv string) (sc StorageClass, err error) {
|
||||
}
|
||||
|
||||
// Validates the parity disks.
|
||||
func validateParity(ssParity, rrsParity, drivesPerSet int) (err error) {
|
||||
func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
|
||||
if ssParity == 0 && rrsParity == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -174,12 +174,12 @@ func validateParity(ssParity, rrsParity, drivesPerSet int) (err error) {
|
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be greater than or equal to %d", rrsParity, minParityDisks)
|
||||
}
|
||||
|
||||
if ssParity > drivesPerSet/2 {
|
||||
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, drivesPerSet/2)
|
||||
if ssParity > setDriveCount/2 {
|
||||
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, setDriveCount/2)
|
||||
}
|
||||
|
||||
if rrsParity > drivesPerSet/2 {
|
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, drivesPerSet/2)
|
||||
if rrsParity > setDriveCount/2 {
|
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, setDriveCount/2)
|
||||
}
|
||||
|
||||
if ssParity > 0 && rrsParity > 0 {
|
||||
@@ -220,13 +220,13 @@ func Enabled(kvs config.KVS) bool {
|
||||
}
|
||||
|
||||
// LookupConfig - lookup storage class config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
|
||||
func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
|
||||
cfg = Config{}
|
||||
cfg.Standard.Parity = drivesPerSet / 2
|
||||
cfg.Standard.Parity = setDriveCount / 2
|
||||
cfg.RRS.Parity = defaultRRSParity
|
||||
|
||||
if err = config.CheckValidKeys(config.StorageClassSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
ssc := env.Get(StandardEnv, kvs.Get(ClassStandard))
|
||||
@@ -235,17 +235,17 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
|
||||
if ssc != "" {
|
||||
cfg.Standard, err = parseStorageClass(ssc)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
return Config{}, err
|
||||
}
|
||||
}
|
||||
if cfg.Standard.Parity == 0 {
|
||||
cfg.Standard.Parity = drivesPerSet / 2
|
||||
cfg.Standard.Parity = setDriveCount / 2
|
||||
}
|
||||
|
||||
if rrsc != "" {
|
||||
cfg.RRS, err = parseStorageClass(rrsc)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
return Config{}, err
|
||||
}
|
||||
}
|
||||
if cfg.RRS.Parity == 0 {
|
||||
@@ -254,8 +254,8 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
|
||||
|
||||
// Validation is done after parsing both the storage classes. This is needed because we need one
|
||||
// storage class value to deduce the correct value of the other storage class.
|
||||
if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, drivesPerSet); err != nil {
|
||||
return cfg, err
|
||||
if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, setDriveCount); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
|
||||
@@ -69,10 +69,10 @@ func TestParseStorageClass(t *testing.T) {
|
||||
|
||||
func TestValidateParity(t *testing.T) {
|
||||
tests := []struct {
|
||||
rrsParity int
|
||||
ssParity int
|
||||
success bool
|
||||
drivesPerSet int
|
||||
rrsParity int
|
||||
ssParity int
|
||||
success bool
|
||||
setDriveCount int
|
||||
}{
|
||||
{2, 4, true, 16},
|
||||
{3, 3, true, 16},
|
||||
@@ -85,7 +85,7 @@ func TestValidateParity(t *testing.T) {
|
||||
{9, 2, false, 16},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
err := validateParity(tt.ssParity, tt.rrsParity, tt.drivesPerSet)
|
||||
err := validateParity(tt.ssParity, tt.rrsParity, tt.setDriveCount)
|
||||
if err != nil && tt.success {
|
||||
t.Errorf("Test %d, Expected success, got %s", i+1, err)
|
||||
}
|
||||
|
||||
@@ -117,6 +117,39 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh <-chan
|
||||
sys.pubsub.Subscribe(subCh, doneCh, filter)
|
||||
}
|
||||
|
||||
// Validate if HTTPConsoleLoggerSys is valid, always returns nil right now
|
||||
func (sys *HTTPConsoleLoggerSys) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Endpoint - dummy function for interface compatibility
|
||||
func (sys *HTTPConsoleLoggerSys) Endpoint() string {
|
||||
return sys.console.Endpoint()
|
||||
}
|
||||
|
||||
// String - stringer function for interface compatibility
|
||||
func (sys *HTTPConsoleLoggerSys) String() string {
|
||||
return "console+http"
|
||||
}
|
||||
|
||||
// Content returns the console stdout log
|
||||
func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {
|
||||
sys.RLock()
|
||||
sys.logBuf.Do(func(p interface{}) {
|
||||
if p != nil {
|
||||
lg, ok := p.(log.Info)
|
||||
if ok {
|
||||
if (lg.Entry != log.Entry{}) {
|
||||
logs = append(logs, lg.Entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
sys.RUnlock()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Send log message 'e' to console and publish to console
|
||||
// log pubsub system
|
||||
func (sys *HTTPConsoleLoggerSys) Send(e interface{}, logKind string) error {
|
||||
|
||||
@@ -16,11 +16,14 @@ package crypto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/ellipses"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
@@ -167,7 +170,8 @@ const (
|
||||
|
||||
const (
|
||||
// EnvKMSKesEndpoint is the environment variable used to specify
|
||||
// the kes server HTTPS endpoint.
|
||||
// one or multiple KES server HTTPS endpoints. The individual
|
||||
// endpoints should be separated by ','.
|
||||
EnvKMSKesEndpoint = "MINIO_KMS_KES_ENDPOINT"
|
||||
|
||||
// EnvKMSKesKeyFile is the environment variable used to specify
|
||||
@@ -216,16 +220,36 @@ func LookupKesConfig(kvs config.KVS) (KesConfig, error) {
|
||||
kesCfg := KesConfig{}
|
||||
|
||||
endpointStr := env.Get(EnvKMSKesEndpoint, kvs.Get(KMSKesEndpoint))
|
||||
if endpointStr != "" {
|
||||
// Lookup kes configuration & overwrite config entry if ENV var is present
|
||||
endpoint, err := xnet.ParseHTTPURL(endpointStr)
|
||||
var endpoints []string
|
||||
for _, endpoint := range strings.Split(endpointStr, ",") {
|
||||
if strings.TrimSpace(endpoint) == "" {
|
||||
continue
|
||||
}
|
||||
if !ellipses.HasEllipses(endpoint) {
|
||||
endpoints = append(endpoints, endpoint)
|
||||
continue
|
||||
}
|
||||
pattern, err := ellipses.FindEllipsesPatterns(endpoint)
|
||||
if err != nil {
|
||||
return kesCfg, err
|
||||
}
|
||||
endpointStr = endpoint.String()
|
||||
for _, p := range pattern {
|
||||
endpoints = append(endpoints, p.Expand()...)
|
||||
}
|
||||
}
|
||||
if len(endpoints) == 0 {
|
||||
return kesCfg, nil
|
||||
}
|
||||
|
||||
kesCfg.Endpoint = endpointStr
|
||||
randNum := rand.Intn(len(endpoints) + 1) // We add 1 b/c len(endpoints) may be 0: See: rand.Intn docs
|
||||
kesCfg.Endpoint = make([]string, len(endpoints))
|
||||
for i, endpoint := range endpoints {
|
||||
endpoint, err := xnet.ParseHTTPURL(endpoint)
|
||||
if err != nil {
|
||||
return kesCfg, err
|
||||
}
|
||||
kesCfg.Endpoint[(randNum+i)%len(endpoints)] = endpoint.String()
|
||||
}
|
||||
kesCfg.KeyFile = env.Get(EnvKMSKesKeyFile, kvs.Get(KMSKesKeyFile))
|
||||
kesCfg.CertFile = env.Get(EnvKMSKesCertFile, kvs.Get(KMSKesCertFile))
|
||||
kesCfg.CAPath = env.Get(EnvKMSKesCAPath, kvs.Get(KMSKesCAPath))
|
||||
|
||||
@@ -18,9 +18,11 @@ import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
||||
// SSEHeader is the general AWS SSE HTTP header key.
|
||||
@@ -81,6 +83,8 @@ const (
|
||||
func RemoveSensitiveHeaders(h http.Header) {
|
||||
h.Del(SSECKey)
|
||||
h.Del(SSECopyKey)
|
||||
h.Del(xhttp.AmzMetaUnencryptedContentLength)
|
||||
h.Del(xhttp.AmzMetaUnencryptedContentMD5)
|
||||
}
|
||||
|
||||
// IsRequested returns true if the HTTP headers indicates
|
||||
@@ -144,6 +148,7 @@ func (s3KMS) ParseHTTP(h http.Header) (string, interface{}, error) {
|
||||
contextStr, ok := h[SSEKmsContext]
|
||||
if ok {
|
||||
var context map[string]interface{}
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err := json.Unmarshal([]byte(contextStr[0]), &context); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
@@ -457,6 +457,16 @@ var removeSensitiveHeadersTests = []struct {
|
||||
"X-Amz-Meta-Test-1": []string{"Test-1"},
|
||||
},
|
||||
},
|
||||
{ // https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
Header: http.Header{
|
||||
"X-Amz-Meta-X-Amz-Unencrypted-Content-Md5": []string{"value"},
|
||||
"X-Amz-Meta-X-Amz-Unencrypted-Content-Length": []string{"value"},
|
||||
"X-Amz-Meta-Test-1": []string{"Test-1"},
|
||||
},
|
||||
ExpectedHeader: http.Header{
|
||||
"X-Amz-Meta-Test-1": []string{"Test-1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestRemoveSensitiveHeaders(t *testing.T) {
|
||||
|
||||
@@ -16,9 +16,9 @@ package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -30,10 +30,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
// ErrKESKeyExists is the error returned a KES server
|
||||
// when a master key does exist.
|
||||
var ErrKESKeyExists = NewKESError(http.StatusBadRequest, "key does already exist")
|
||||
@@ -43,8 +46,8 @@ var ErrKESKeyExists = NewKESError(http.StatusBadRequest, "key does already exist
|
||||
type KesConfig struct {
|
||||
Enabled bool
|
||||
|
||||
// The kes server endpoint.
|
||||
Endpoint string
|
||||
// The KES server endpoints.
|
||||
Endpoint []string
|
||||
|
||||
// The path to the TLS private key used
|
||||
// by MinIO to authenticate to the kes
|
||||
@@ -83,7 +86,7 @@ type KesConfig struct {
|
||||
// Verify verifies if the kes configuration is correct
|
||||
func (k KesConfig) Verify() (err error) {
|
||||
switch {
|
||||
case k.Endpoint == "":
|
||||
case len(k.Endpoint) == 0:
|
||||
err = Errorf("crypto: missing kes endpoint")
|
||||
case k.CertFile == "":
|
||||
err = Errorf("crypto: missing cert file")
|
||||
@@ -98,7 +101,7 @@ func (k KesConfig) Verify() (err error) {
|
||||
type kesService struct {
|
||||
client *kesClient
|
||||
|
||||
endpoint string
|
||||
endpoints []string
|
||||
defaultKeyID string
|
||||
}
|
||||
|
||||
@@ -113,23 +116,37 @@ func NewKes(cfg KesConfig) (KMS, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certPool, err := loadCACertificates(cfg.CAPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if cfg.Transport.TLSClientConfig != nil {
|
||||
if err = loadCACertificates(cfg.CAPath,
|
||||
cfg.Transport.TLSClientConfig.RootCAs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
if rootCAs == nil {
|
||||
// In some systems (like Windows) system cert pool is
|
||||
// not supported or no certificates are present on the
|
||||
// system - so we create a new cert pool.
|
||||
rootCAs = x509.NewCertPool()
|
||||
}
|
||||
if err = loadCACertificates(cfg.CAPath, rootCAs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.Transport.TLSClientConfig = &tls.Config{
|
||||
RootCAs: rootCAs,
|
||||
}
|
||||
}
|
||||
cfg.Transport.TLSClientConfig = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
RootCAs: certPool,
|
||||
}
|
||||
cfg.Transport.ForceAttemptHTTP2 = true
|
||||
cfg.Transport.TLSClientConfig.Certificates = []tls.Certificate{cert}
|
||||
cfg.Transport.TLSClientConfig.NextProtos = []string{"h2"}
|
||||
|
||||
return &kesService{
|
||||
client: &kesClient{
|
||||
addr: cfg.Endpoint,
|
||||
endpoints: cfg.Endpoint,
|
||||
httpClient: http.Client{
|
||||
Transport: cfg.Transport,
|
||||
},
|
||||
},
|
||||
endpoint: cfg.Endpoint,
|
||||
endpoints: cfg.Endpoint,
|
||||
defaultKeyID: cfg.DefaultKeyID,
|
||||
}, nil
|
||||
}
|
||||
@@ -146,9 +163,9 @@ func (kes *kesService) DefaultKeyID() string {
|
||||
// method.
|
||||
func (kes *kesService) Info() KMSInfo {
|
||||
return KMSInfo{
|
||||
Endpoint: kes.endpoint,
|
||||
Name: kes.DefaultKeyID(),
|
||||
AuthType: "TLS",
|
||||
Endpoints: kes.endpoints,
|
||||
Name: kes.DefaultKeyID(),
|
||||
AuthType: "TLS",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -204,7 +221,7 @@ func (kes *kesService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (k
|
||||
// • GenerateDataKey (API: /v1/key/generate/)
|
||||
// • DecryptDataKey (API: /v1/key/decrypt/)
|
||||
type kesClient struct {
|
||||
addr string
|
||||
endpoints []string
|
||||
httpClient http.Client
|
||||
}
|
||||
|
||||
@@ -215,8 +232,8 @@ type kesClient struct {
|
||||
// application does not have the cryptographic key at
|
||||
// any point in time.
|
||||
func (c *kesClient) CreateKey(name string) error {
|
||||
url := fmt.Sprintf("%s/v1/key/create/%s", c.addr, url.PathEscape(name))
|
||||
_, err := c.postRetry(url, nil, 0) // No request body and no response expected
|
||||
path := fmt.Sprintf("/v1/key/create/%s", url.PathEscape(name))
|
||||
_, err := c.postRetry(path, nil, 0) // No request body and no response expected
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -248,8 +265,8 @@ func (c *kesClient) GenerateDataKey(name string, context []byte) ([]byte, []byte
|
||||
}
|
||||
|
||||
const limit = 1 << 20 // A plaintext/ciphertext key pair will never be larger than 1 MB
|
||||
url := fmt.Sprintf("%s/v1/key/generate/%s", c.addr, url.PathEscape(name))
|
||||
resp, err := c.postRetry(url, bytes.NewReader(body), limit)
|
||||
path := fmt.Sprintf("/v1/key/generate/%s", url.PathEscape(name))
|
||||
resp, err := c.postRetry(path, bytes.NewReader(body), limit)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -285,8 +302,8 @@ func (c *kesClient) DecryptDataKey(name string, ciphertext, context []byte) ([]b
|
||||
}
|
||||
|
||||
const limit = 1 << 20 // A data key will never be larger than 1 MiB
|
||||
url := fmt.Sprintf("%s/v1/key/decrypt/%s", c.addr, url.PathEscape(name))
|
||||
resp, err := c.postRetry(url, bytes.NewReader(body), limit)
|
||||
path := fmt.Sprintf("/v1/key/decrypt/%s", url.PathEscape(name))
|
||||
resp, err := c.postRetry(path, bytes.NewReader(body), limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -357,7 +374,16 @@ func parseErrorResponse(resp *http.Response) error {
|
||||
}
|
||||
|
||||
func (c *kesClient) post(url string, body io.Reader, limit int64) (io.Reader, error) {
|
||||
resp, err := c.httpClient.Post(url, "application/json", body)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -376,26 +402,34 @@ func (c *kesClient) post(url string, body io.Reader, limit int64) (io.Reader, er
|
||||
return &respBody, nil
|
||||
}
|
||||
|
||||
func (c *kesClient) postRetry(url string, body io.ReadSeeker, limit int64) (io.Reader, error) {
|
||||
func (c *kesClient) postRetry(path string, body io.ReadSeeker, limit int64) (io.Reader, error) {
|
||||
retryMax := 1 + len(c.endpoints)
|
||||
for i := 0; ; i++ {
|
||||
if body != nil {
|
||||
body.Seek(0, io.SeekStart) // seek to the beginning of the body.
|
||||
}
|
||||
response, err := c.post(url, body, limit)
|
||||
|
||||
response, err := c.post(c.endpoints[i%len(c.endpoints)]+path, body, limit)
|
||||
if err == nil {
|
||||
return response, nil
|
||||
}
|
||||
|
||||
if !xnet.IsNetworkOrHostDown(err) && !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
// If the error is not temp. / retryable => fail the request immediately.
|
||||
if !xnet.IsNetworkOrHostDown(err) &&
|
||||
!errors.Is(err, io.EOF) &&
|
||||
!errors.Is(err, io.ErrUnexpectedEOF) &&
|
||||
!errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// retriable network errors.
|
||||
remain := retryMax - i
|
||||
if remain <= 0 {
|
||||
if remain := retryMax - i; remain <= 0 { // Fail if we exceeded our retry limit.
|
||||
return response, err
|
||||
}
|
||||
|
||||
// If there are more KES instances then skip waiting and
|
||||
// try the next endpoint directly.
|
||||
if i < len(c.endpoints) {
|
||||
continue
|
||||
}
|
||||
<-time.After(LinearJitterBackoff(retryWaitMin, retryWaitMax, i))
|
||||
}
|
||||
}
|
||||
@@ -413,24 +447,17 @@ func (c *kesClient) postRetry(url string, body io.ReadSeeker, limit int64) (io.R
|
||||
// file as PEM-encoded certificate and add it to
|
||||
// the CertPool. If a file is not a PEM certificate
|
||||
// it will be ignored.
|
||||
func loadCACertificates(path string) (*x509.CertPool, error) {
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
if rootCAs == nil {
|
||||
// In some systems (like Windows) system cert pool is
|
||||
// not supported or no certificates are present on the
|
||||
// system - so we create a new cert pool.
|
||||
rootCAs = x509.NewCertPool()
|
||||
}
|
||||
func loadCACertificates(path string, rootCAs *x509.CertPool) error {
|
||||
if path == "" {
|
||||
return rootCAs, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
stat, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return rootCAs, nil
|
||||
return nil
|
||||
}
|
||||
return nil, Errorf("crypto: cannot open '%s': %v", path, err)
|
||||
return Errorf("crypto: cannot open '%s': %v", path, err)
|
||||
}
|
||||
|
||||
// If path is a file, parse as PEM-encoded certifcate
|
||||
@@ -439,12 +466,12 @@ func loadCACertificates(path string) (*x509.CertPool, error) {
|
||||
if !stat.IsDir() {
|
||||
cert, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if !rootCAs.AppendCertsFromPEM(cert) {
|
||||
return nil, Errorf("crypto: '%s' is not a valid PEM-encoded certificate", path)
|
||||
return Errorf("crypto: '%s' is not a valid PEM-encoded certificate", path)
|
||||
}
|
||||
return rootCAs, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// If path is a directory then try
|
||||
@@ -454,7 +481,7 @@ func loadCACertificates(path string) (*x509.CertPool, error) {
|
||||
// we ignore it.
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
for _, file := range files {
|
||||
cert, err := ioutil.ReadFile(filepath.Join(path, file.Name()))
|
||||
@@ -463,6 +490,6 @@ func loadCACertificates(path string) (*x509.CertPool, error) {
|
||||
}
|
||||
rootCAs.AppendCertsFromPEM(cert) // ignore files which are not PEM certtificates
|
||||
}
|
||||
return rootCAs, nil
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
@@ -109,9 +109,9 @@ type masterKeyKMS struct {
|
||||
// KMSInfo contains some describing information about
|
||||
// the KMS.
|
||||
type KMSInfo struct {
|
||||
Endpoint string
|
||||
Name string
|
||||
AuthType string
|
||||
Endpoints []string
|
||||
Name string
|
||||
AuthType string
|
||||
}
|
||||
|
||||
// NewMasterKey returns a basic KMS implementation from a single 256 bit master key.
|
||||
@@ -147,9 +147,9 @@ func (kms *masterKeyKMS) GenerateKey(keyID string, ctx Context) (key [32]byte, s
|
||||
// KMS is configured directly using master key
|
||||
func (kms *masterKeyKMS) Info() (info KMSInfo) {
|
||||
return KMSInfo{
|
||||
Endpoint: "",
|
||||
Name: "",
|
||||
AuthType: "master-key",
|
||||
Endpoints: []string{},
|
||||
Name: "",
|
||||
AuthType: "master-key",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
@@ -38,6 +39,8 @@ func IsMultiPart(metadata map[string]string) bool {
|
||||
func RemoveSensitiveEntries(metadata map[string]string) { // The functions is tested in TestRemoveSensitiveHeaders for compatibility reasons
|
||||
delete(metadata, SSECKey)
|
||||
delete(metadata, SSECopyKey)
|
||||
delete(metadata, xhttp.AmzMetaUnencryptedContentLength)
|
||||
delete(metadata, xhttp.AmzMetaUnencryptedContentMD5)
|
||||
}
|
||||
|
||||
// RemoveSSEHeaders removes all crypto-specific SSE
|
||||
@@ -62,6 +65,17 @@ func RemoveInternalEntries(metadata map[string]string) {
|
||||
delete(metadata, S3KMSSealedKey)
|
||||
}
|
||||
|
||||
// IsSourceEncrypted returns true if the source is encrypted
|
||||
func IsSourceEncrypted(metadata map[string]string) bool {
|
||||
if _, ok := metadata[SSECAlgorithm]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := metadata[SSEHeader]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsEncrypted returns true if the object metadata indicates
|
||||
// that it was uploaded using some form of server-side-encryption.
|
||||
//
|
||||
@@ -115,7 +129,7 @@ func (ssec) IsEncrypted(metadata map[string]string) bool {
|
||||
// metadata is nil.
|
||||
func CreateMultipartMetadata(metadata map[string]string) map[string]string {
|
||||
if metadata == nil {
|
||||
metadata = map[string]string{}
|
||||
return map[string]string{SSEMultipart: ""}
|
||||
}
|
||||
metadata[SSEMultipart] = ""
|
||||
return metadata
|
||||
@@ -142,7 +156,7 @@ func (s3) CreateMetadata(metadata map[string]string, keyID string, kmsKey []byte
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = map[string]string{}
|
||||
metadata = make(map[string]string, 5)
|
||||
}
|
||||
|
||||
metadata[SSESealAlgorithm] = sealedKey.Algorithm
|
||||
@@ -222,7 +236,7 @@ func (ssec) CreateMetadata(metadata map[string]string, sealedKey SealedKey) map[
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = map[string]string{}
|
||||
metadata = make(map[string]string, 3)
|
||||
}
|
||||
metadata[SSESealAlgorithm] = SealAlgorithm
|
||||
metadata[SSEIV] = base64.StdEncoding.EncodeToString(sealedKey.IV[:])
|
||||
|
||||
@@ -21,9 +21,8 @@ import (
|
||||
|
||||
// default retry configuration
|
||||
const (
|
||||
retryWaitMin = 500 * time.Millisecond // minimum retry limit.
|
||||
retryWaitMax = 3 * time.Second // 3 secs worth of max retry.
|
||||
retryMax = 2
|
||||
retryWaitMin = 100 * time.Millisecond // minimum retry limit.
|
||||
retryWaitMax = 1500 * time.Millisecond // 1.5 secs worth of max retry.
|
||||
)
|
||||
|
||||
// LinearJitterBackoff provides the time.Duration for a caller to
|
||||
|
||||
@@ -199,13 +199,13 @@ func (v *vaultService) DefaultKeyID() string {
|
||||
}
|
||||
|
||||
// Info returns some information about the Vault,
|
||||
// configuration - like the endpoint or authentication
|
||||
// configuration - like the endpoints or authentication
|
||||
// method.
|
||||
func (v *vaultService) Info() KMSInfo {
|
||||
return KMSInfo{
|
||||
Endpoint: v.config.Endpoint,
|
||||
Name: v.DefaultKeyID(),
|
||||
AuthType: v.config.Auth.Type,
|
||||
Endpoints: []string{v.config.Endpoint},
|
||||
Name: v.DefaultKeyID(),
|
||||
AuthType: v.config.Auth.Type,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
@@ -28,6 +29,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/crawler"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
@@ -35,6 +37,7 @@ import (
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/willf/bloom"
|
||||
)
|
||||
|
||||
@@ -44,6 +47,14 @@ const (
|
||||
dataCrawlStartDelay = 5 * time.Minute // Time to wait on startup and between cycles.
|
||||
dataUsageUpdateDirCycles = 16 // Visit all folders every n cycles.
|
||||
|
||||
healDeleteDangling = true
|
||||
healFolderIncludeProb = 32 // Include a clean folder one in n cycles.
|
||||
healObjectSelectProb = 512 // Overall probability of a file being scanned; one in n.
|
||||
)
|
||||
|
||||
var (
|
||||
globalCrawlerConfig crawler.Config
|
||||
dataCrawlerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
)
|
||||
|
||||
// initDataCrawler will start the crawler unless disabled.
|
||||
@@ -57,6 +68,19 @@ func initDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
// The function will block until the context is canceled.
|
||||
// There should only ever be one crawler running per cluster.
|
||||
func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Make sure only 1 crawler is running on the cluster.
|
||||
locker := objAPI.NewNSLock(ctx, minioMetaBucket, "runDataCrawler.lock")
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for {
|
||||
err := locker.GetLock(dataCrawlerLeaderLockTimeout)
|
||||
if err != nil {
|
||||
time.Sleep(time.Duration(r.Float64() * float64(dataCrawlStartDelay)))
|
||||
continue
|
||||
}
|
||||
break
|
||||
// No unlock for "leader" lock.
|
||||
}
|
||||
|
||||
// Load current bloom cycle
|
||||
nextBloomCycle := intDataUpdateTracker.current() + 1
|
||||
var buf bytes.Buffer
|
||||
@@ -87,12 +111,6 @@ func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
if err == nil {
|
||||
// Store new cycle...
|
||||
nextBloomCycle++
|
||||
if nextBloomCycle%dataUpdateTrackerResetEvery == 0 {
|
||||
if intDataUpdateTracker.debug {
|
||||
logger.Info(color.Green("runDataCrawler:") + " Resetting bloom filter for next runs.")
|
||||
}
|
||||
nextBloomCycle++
|
||||
}
|
||||
var tmp [8]byte
|
||||
binary.LittleEndian.PutUint64(tmp[:], nextBloomCycle)
|
||||
r, err := hash.NewReader(bytes.NewReader(tmp[:]), int64(len(tmp)), "", "", int64(len(tmp)), false)
|
||||
@@ -111,8 +129,9 @@ func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
|
||||
type cachedFolder struct {
|
||||
name string
|
||||
parent *dataUsageHash
|
||||
name string
|
||||
parent *dataUsageHash
|
||||
objectHealProbDiv uint32
|
||||
}
|
||||
|
||||
type folderScanner struct {
|
||||
@@ -125,6 +144,8 @@ type folderScanner struct {
|
||||
|
||||
dataUsageCrawlMult float64
|
||||
dataUsageCrawlDebug bool
|
||||
healFolderInclude uint32 // Include a clean folder one in n cycles.
|
||||
healObjectSelect uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude
|
||||
|
||||
newFolders []cachedFolder
|
||||
existingFolders []cachedFolder
|
||||
@@ -167,8 +188,17 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
existingFolders: nil,
|
||||
dataUsageCrawlMult: delayMult,
|
||||
dataUsageCrawlDebug: intDataUpdateTracker.debug,
|
||||
healFolderInclude: 0,
|
||||
healObjectSelect: 0,
|
||||
}
|
||||
|
||||
// Enable healing in XL mode.
|
||||
if globalIsErasure {
|
||||
// Include a clean folder one in n cycles.
|
||||
s.healFolderInclude = healFolderIncludeProb
|
||||
// Do a heal check on an object once every n cycles. Must divide into healFolderInclude
|
||||
s.healObjectSelect = healObjectSelectProb
|
||||
}
|
||||
if len(cache.Info.BloomFilter) > 0 {
|
||||
s.withFilter = &bloomFilter{BloomFilter: &bloom.BloomFilter{}}
|
||||
_, err := s.withFilter.ReadFrom(bytes.NewBuffer(cache.Info.BloomFilter))
|
||||
@@ -189,7 +219,7 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
}
|
||||
|
||||
// Always scan flattenLevels deep. Cache root is level 0.
|
||||
todo := []cachedFolder{{name: cache.Info.Name}}
|
||||
todo := []cachedFolder{{name: cache.Info.Name, objectHealProbDiv: 1}}
|
||||
for i := 0; i < flattenLevels; i++ {
|
||||
if s.dataUsageCrawlDebug {
|
||||
logger.Info(logPrefix+"Level %v, scanning %v directories."+logSuffix, i, len(todo))
|
||||
@@ -218,7 +248,7 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
return s.newCache, ctx.Err()
|
||||
default:
|
||||
}
|
||||
du, err := s.deepScanFolder(ctx, folder.name)
|
||||
du, err := s.deepScanFolder(ctx, folder)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
@@ -249,26 +279,38 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
}
|
||||
h := hashPath(folder.name)
|
||||
if !h.mod(s.oldCache.Info.NextCycle, dataUsageUpdateDirCycles) {
|
||||
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
|
||||
continue
|
||||
if !h.mod(s.oldCache.Info.NextCycle, s.healFolderInclude/folder.objectHealProbDiv) {
|
||||
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
|
||||
continue
|
||||
} else {
|
||||
folder.objectHealProbDiv = s.healFolderInclude
|
||||
}
|
||||
folder.objectHealProbDiv = dataUsageUpdateDirCycles
|
||||
}
|
||||
|
||||
if s.withFilter != nil {
|
||||
_, prefix := path2BucketObjectWithBasePath(basePath, folder.name)
|
||||
if s.oldCache.Info.lifeCycle == nil || !s.oldCache.Info.lifeCycle.HasActiveRules(prefix, true) {
|
||||
// If folder isn't in filter, skip it completely.
|
||||
if !s.withFilter.containsDir(folder.name) {
|
||||
if s.dataUsageCrawlDebug {
|
||||
logger.Info(logPrefix+"Skipping non-updated folder: %v"+logSuffix, folder)
|
||||
if !h.mod(s.oldCache.Info.NextCycle, s.healFolderInclude/folder.objectHealProbDiv) {
|
||||
if s.dataUsageCrawlDebug {
|
||||
logger.Info(logPrefix+"Skipping non-updated folder: %v"+logSuffix, folder)
|
||||
}
|
||||
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
|
||||
continue
|
||||
} else {
|
||||
if s.dataUsageCrawlDebug {
|
||||
logger.Info(logPrefix+"Adding non-updated folder to heal check: %v"+logSuffix, folder.name)
|
||||
}
|
||||
// Update probability of including objects
|
||||
folder.objectHealProbDiv = s.healFolderInclude
|
||||
}
|
||||
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update on this cycle...
|
||||
du, err := s.deepScanFolder(ctx, folder.name)
|
||||
du, err := s.deepScanFolder(ctx, folder)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
@@ -301,15 +343,16 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
default:
|
||||
}
|
||||
thisHash := hashPath(folder.name)
|
||||
existing := f.oldCache.findChildrenCopy(thisHash)
|
||||
|
||||
// If there are lifecycle rules for the prefix, remove the filter.
|
||||
filter := f.withFilter
|
||||
var activeLifeCycle *lifecycle.Lifecycle
|
||||
if f.oldCache.Info.lifeCycle != nil && filter != nil {
|
||||
if f.oldCache.Info.lifeCycle != nil {
|
||||
_, prefix := path2BucketObjectWithBasePath(f.root, folder.name)
|
||||
if f.oldCache.Info.lifeCycle.HasActiveRules(prefix, true) {
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("data-usage:")+" Prefix %q has active rules", prefix)
|
||||
logger.Info(color.Green("folder-scanner:")+" Prefix %q has active rules", prefix)
|
||||
}
|
||||
activeLifeCycle = f.oldCache.Info.lifeCycle
|
||||
filter = nil
|
||||
@@ -318,11 +361,19 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
if _, ok := f.oldCache.Cache[thisHash.Key()]; filter != nil && ok {
|
||||
// If folder isn't in filter and we have data, skip it completely.
|
||||
if folder.name != dataUsageRoot && !filter.containsDir(folder.name) {
|
||||
f.newCache.copyWithChildren(&f.oldCache, thisHash, folder.parent)
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("data-usage:")+" Skipping non-updated folder: %v", folder.name)
|
||||
if !thisHash.mod(f.oldCache.Info.NextCycle, f.healFolderInclude/folder.objectHealProbDiv) {
|
||||
f.newCache.copyWithChildren(&f.oldCache, thisHash, folder.parent)
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("folder-scanner:")+" Skipping non-updated folder: %v", folder.name)
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("folder-scanner:")+" Adding non-updated folder to heal check: %v", folder.name)
|
||||
}
|
||||
// If probability was already crawlerHealFolderInclude, keep it.
|
||||
folder.objectHealProbDiv = f.healFolderInclude
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
f.waitForLowActiveIO()
|
||||
@@ -336,14 +387,14 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
bucket, prefix := path2BucketObjectWithBasePath(f.root, entName)
|
||||
if bucket == "" {
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("data-usage:")+" no bucket (%s,%s)", f.root, entName)
|
||||
logger.Info(color.Green("folder-scanner:")+" no bucket (%s,%s)", f.root, entName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if isReservedOrInvalidBucket(bucket, false) {
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("data-usage:")+" invalid bucket: %v, entry: %v", bucket, entName)
|
||||
logger.Info(color.Green("folder-scanner:")+" invalid bucket: %v, entry: %v", bucket, entName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -359,7 +410,8 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
_, exists := f.oldCache.Cache[h.Key()]
|
||||
cache.addChildString(entName)
|
||||
|
||||
this := cachedFolder{name: entName, parent: &thisHash}
|
||||
this := cachedFolder{name: entName, parent: &thisHash, objectHealProbDiv: folder.objectHealProbDiv}
|
||||
delete(existing, h.Key())
|
||||
cache.addChild(h)
|
||||
if final {
|
||||
if exists {
|
||||
@@ -385,11 +437,12 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
objectName: path.Base(entName),
|
||||
debug: f.dataUsageCrawlDebug,
|
||||
lifeCycle: activeLifeCycle,
|
||||
heal: thisHash.mod(f.oldCache.Info.NextCycle, f.healObjectSelect/folder.objectHealProbDiv),
|
||||
}
|
||||
size, err := f.getSize(item)
|
||||
|
||||
sleepDuration(time.Since(t), f.dataUsageCrawlMult)
|
||||
if err == errSkipFile || err == errFileNotFound {
|
||||
if err == errSkipFile {
|
||||
return nil
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
@@ -402,19 +455,78 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if f.healObjectSelect == 0 {
|
||||
// If we are not scanning, return now.
|
||||
f.newCache.replaceHashed(thisHash, folder.parent, cache)
|
||||
continue
|
||||
}
|
||||
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
bgSeq, found := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
// Whatever remains in 'existing' are folders at this level
|
||||
// that existed in the previous run but wasn't found now.
|
||||
//
|
||||
// This may be because of 2 reasons:
|
||||
//
|
||||
// 1) The folder/object was deleted.
|
||||
// 2) We come from another disk and this disk missed the write.
|
||||
//
|
||||
// We therefore perform a heal check.
|
||||
// If that doesn't bring it back we remove the folder and assume it was deleted.
|
||||
// This means that the next run will not look for it.
|
||||
for k := range existing {
|
||||
f.waitForLowActiveIO()
|
||||
bucket, prefix := path2BucketObject(k)
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("folder-scanner:")+" checking disappeared folder: %v/%v", bucket, prefix)
|
||||
}
|
||||
|
||||
err = objAPI.HealObjects(ctx, bucket, prefix, madmin.HealOpts{Recursive: true, Remove: healDeleteDangling},
|
||||
func(bucket, object, versionID string) error {
|
||||
return bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
})
|
||||
|
||||
if f.dataUsageCrawlDebug && err != nil {
|
||||
logger.Info(color.Green("healObjects:")+" checking returned value %v", err)
|
||||
}
|
||||
|
||||
// Add unless healing returned an error.
|
||||
if err == nil {
|
||||
this := cachedFolder{name: k, parent: &thisHash, objectHealProbDiv: folder.objectHealProbDiv}
|
||||
cache.addChild(hashPath(k))
|
||||
if final {
|
||||
f.existingFolders = append(f.existingFolders, this)
|
||||
} else {
|
||||
nextFolders = append(nextFolders, this)
|
||||
}
|
||||
}
|
||||
}
|
||||
f.newCache.replaceHashed(thisHash, folder.parent, cache)
|
||||
}
|
||||
return nextFolders, nil
|
||||
}
|
||||
|
||||
// deepScanFolder will deep scan a folder and return the size if no error occurs.
|
||||
func (f *folderScanner) deepScanFolder(ctx context.Context, folder string) (*dataUsageEntry, error) {
|
||||
func (f *folderScanner) deepScanFolder(ctx context.Context, folder cachedFolder) (*dataUsageEntry, error) {
|
||||
var cache dataUsageEntry
|
||||
|
||||
done := ctx.Done()
|
||||
|
||||
var addDir func(entName string, typ os.FileMode) error
|
||||
var dirStack = []string{f.root, folder}
|
||||
var dirStack = []string{f.root, folder.name}
|
||||
|
||||
addDir = func(entName string, typ os.FileMode) error {
|
||||
select {
|
||||
@@ -445,7 +557,7 @@ func (f *folderScanner) deepScanFolder(ctx context.Context, folder string) (*dat
|
||||
if f.oldCache.Info.lifeCycle != nil {
|
||||
if f.oldCache.Info.lifeCycle.HasActiveRules(prefix, false) {
|
||||
if f.dataUsageCrawlDebug {
|
||||
logger.Info(color.Green("data-usage:")+" Prefix %q has active rules", prefix)
|
||||
logger.Info(color.Green("folder-scanner:")+" Prefix %q has active rules", prefix)
|
||||
}
|
||||
activeLifeCycle = f.oldCache.Info.lifeCycle
|
||||
}
|
||||
@@ -460,6 +572,7 @@ func (f *folderScanner) deepScanFolder(ctx context.Context, folder string) (*dat
|
||||
objectName: path.Base(entName),
|
||||
debug: f.dataUsageCrawlDebug,
|
||||
lifeCycle: activeLifeCycle,
|
||||
heal: hashPath(path.Join(prefix, entName)).mod(f.oldCache.Info.NextCycle, f.healObjectSelect/folder.objectHealProbDiv),
|
||||
})
|
||||
|
||||
// Don't sleep for really small amount of time
|
||||
@@ -490,6 +603,7 @@ type crawlItem struct {
|
||||
prefix string // Only the prefix if any, does not have final object name.
|
||||
objectName string // Only the object name without prefixes.
|
||||
lifeCycle *lifecycle.Lifecycle
|
||||
heal bool // Has the object been selected for heal check?
|
||||
debug bool
|
||||
}
|
||||
|
||||
@@ -509,8 +623,9 @@ func (i *crawlItem) transformMetaDir() {
|
||||
|
||||
// actionMeta contains information used to apply actions.
|
||||
type actionMeta struct {
|
||||
oi ObjectInfo
|
||||
numVersions int // The number of versions of this object
|
||||
oi ObjectInfo
|
||||
successorModTime time.Time // The modtime of the successor version
|
||||
numVersions int // The number of versions of this object
|
||||
}
|
||||
|
||||
// applyActions will apply lifecycle checks on to a scanned item.
|
||||
@@ -522,6 +637,20 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
|
||||
if i.debug {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
if i.heal {
|
||||
if i.debug {
|
||||
logger.Info(color.Green("applyActions:")+" heal checking: %v/%v v%s", i.bucket, i.objectPath(), meta.oi.VersionID)
|
||||
}
|
||||
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, madmin.HealOpts{Remove: healDeleteDangling})
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return 0
|
||||
}
|
||||
if err != nil && !errors.Is(err, NotImplemented{}) {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
size = res.ObjectSize
|
||||
}
|
||||
if i.lifeCycle == nil {
|
||||
return size
|
||||
}
|
||||
@@ -529,13 +658,14 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
|
||||
versionID := meta.oi.VersionID
|
||||
action := i.lifeCycle.ComputeAction(
|
||||
lifecycle.ObjectOpts{
|
||||
Name: i.objectPath(),
|
||||
UserTags: meta.oi.UserTags,
|
||||
ModTime: meta.oi.ModTime,
|
||||
VersionID: meta.oi.VersionID,
|
||||
DeleteMarker: meta.oi.DeleteMarker,
|
||||
IsLatest: meta.oi.IsLatest,
|
||||
NumVersions: meta.numVersions,
|
||||
Name: i.objectPath(),
|
||||
UserTags: meta.oi.UserTags,
|
||||
ModTime: meta.oi.ModTime,
|
||||
VersionID: meta.oi.VersionID,
|
||||
DeleteMarker: meta.oi.DeleteMarker,
|
||||
IsLatest: meta.oi.IsLatest,
|
||||
NumVersions: meta.numVersions,
|
||||
SuccessorModTime: meta.successorModTime,
|
||||
})
|
||||
if i.debug {
|
||||
logger.Info(color.Green("applyActions:")+" lifecycle: %q (version-id=%s), Initial scan: %v", i.objectPath(), versionID, action)
|
||||
@@ -572,13 +702,14 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
|
||||
// Recalculate action.
|
||||
action = i.lifeCycle.ComputeAction(
|
||||
lifecycle.ObjectOpts{
|
||||
Name: i.objectPath(),
|
||||
UserTags: obj.UserTags,
|
||||
ModTime: obj.ModTime,
|
||||
VersionID: obj.VersionID,
|
||||
DeleteMarker: obj.DeleteMarker,
|
||||
IsLatest: obj.IsLatest,
|
||||
NumVersions: meta.numVersions,
|
||||
Name: i.objectPath(),
|
||||
UserTags: obj.UserTags,
|
||||
ModTime: obj.ModTime,
|
||||
VersionID: obj.VersionID,
|
||||
DeleteMarker: obj.DeleteMarker,
|
||||
IsLatest: obj.IsLatest,
|
||||
NumVersions: meta.numVersions,
|
||||
SuccessorModTime: meta.successorModTime,
|
||||
})
|
||||
if i.debug {
|
||||
logger.Info(color.Green("applyActions:")+" lifecycle: Secondary scan: %v", action)
|
||||
@@ -649,9 +780,6 @@ func sleepDuration(d time.Duration, x float64) {
|
||||
func (i *crawlItem) healReplication(ctx context.Context, o ObjectLayer, meta actionMeta) {
|
||||
if meta.oi.ReplicationStatus == replication.Pending ||
|
||||
meta.oi.ReplicationStatus == replication.Failed {
|
||||
// if heal encounters a pending replication status, either replication
|
||||
// has failed due to server shutdown or crawler and PutObject replication are in contention.
|
||||
healPending := meta.oi.ReplicationStatus == replication.Pending
|
||||
replicateObject(ctx, meta.oi.Bucket, meta.oi.Name, meta.oi.VersionID, o, nil, healPending)
|
||||
globalReplicationState.queueReplicaTask(meta.oi)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,11 +46,8 @@ const (
|
||||
dataUpdateTrackerQueueSize = 10000
|
||||
|
||||
dataUpdateTrackerFilename = dataUsageBucket + SlashSeparator + ".tracker.bin"
|
||||
dataUpdateTrackerVersion = 2
|
||||
dataUpdateTrackerVersion = 3
|
||||
dataUpdateTrackerSaveInterval = 5 * time.Minute
|
||||
|
||||
// Reset bloom filters every n cycle
|
||||
dataUpdateTrackerResetEvery = 1000
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -181,6 +178,8 @@ func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) {
|
||||
}
|
||||
d.load(ctx, drives...)
|
||||
go d.startCollector(ctx)
|
||||
// startSaver will unlock.
|
||||
d.mu.Lock()
|
||||
go d.startSaver(ctx, dataUpdateTrackerSaveInterval, drives)
|
||||
}
|
||||
|
||||
@@ -214,17 +213,17 @@ func (d *dataUpdateTracker) load(ctx context.Context, drives ...string) {
|
||||
}
|
||||
|
||||
// startSaver will start a saver that will write d to all supplied drives at specific intervals.
|
||||
// 'd' must be write locked when started and will be unlocked.
|
||||
// The saver will save and exit when supplied context is closed.
|
||||
func (d *dataUpdateTracker) startSaver(ctx context.Context, interval time.Duration, drives []string) {
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
var buf bytes.Buffer
|
||||
d.mu.Lock()
|
||||
saveNow := d.save
|
||||
exited := make(chan struct{})
|
||||
d.saveExited = exited
|
||||
d.mu.Unlock()
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
defer close(exited)
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
var exit bool
|
||||
select {
|
||||
@@ -237,7 +236,10 @@ func (d *dataUpdateTracker) startSaver(ctx context.Context, interval time.Durati
|
||||
d.mu.Lock()
|
||||
if !d.dirty {
|
||||
d.mu.Unlock()
|
||||
return
|
||||
if exit {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
d.Saved = UTCNow()
|
||||
err := d.serialize(&buf)
|
||||
@@ -364,7 +366,7 @@ func (d *dataUpdateTracker) deserialize(src io.Reader, newerThan time.Time) erro
|
||||
return err
|
||||
}
|
||||
switch tmp[0] {
|
||||
case 1:
|
||||
case 1, 2:
|
||||
logger.Info(color.Green("dataUpdateTracker: ") + "deprecated data version, updating.")
|
||||
return nil
|
||||
case dataUpdateTrackerVersion:
|
||||
@@ -427,6 +429,8 @@ func (d *dataUpdateTracker) deserialize(src io.Reader, newerThan time.Time) erro
|
||||
}
|
||||
// Ignore what remains on the stream.
|
||||
// Update d:
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.Current = dst.Current
|
||||
d.History = dst.History
|
||||
d.Saved = dst.Saved
|
||||
|
||||
@@ -42,6 +42,18 @@ type testingLogger struct {
|
||||
t testLoggerI
|
||||
}
|
||||
|
||||
func (t *testingLogger) Endpoint() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (t *testingLogger) String() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (t *testingLogger) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *testingLogger) Send(entry interface{}, errKind string) error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
@@ -85,7 +85,12 @@ func (e *dataUsageEntry) merge(other dataUsageEntry) {
|
||||
}
|
||||
|
||||
// mod returns true if the hash mod cycles == cycle.
|
||||
// If cycles is 0 false is always returned.
|
||||
// If cycles is 1 true is always returned (as expected).
|
||||
func (h dataUsageHash) mod(cycle uint32, cycles uint32) bool {
|
||||
if cycles <= 1 {
|
||||
return cycles == 1
|
||||
}
|
||||
return uint32(xxhash.Sum64String(string(h)))%cycles == cycle%cycles
|
||||
}
|
||||
|
||||
@@ -117,6 +122,16 @@ func (d *dataUsageCache) find(path string) *dataUsageEntry {
|
||||
return &due
|
||||
}
|
||||
|
||||
// findChildrenCopy returns a copy of the children of the supplied hash.
|
||||
func (d *dataUsageCache) findChildrenCopy(h dataUsageHash) dataUsageHashMap {
|
||||
ch := d.Cache[h.String()].Children
|
||||
res := make(dataUsageHashMap, len(ch))
|
||||
for k := range ch {
|
||||
res[k] = struct{}{}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Returns nil if not found.
|
||||
func (d *dataUsageCache) subCache(path string) dataUsageCache {
|
||||
dst := dataUsageCache{Info: dataUsageCacheInfo{
|
||||
@@ -328,7 +343,7 @@ func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]Bucke
|
||||
flat := d.flatten(*e)
|
||||
dst[bucket.Name] = BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
ObjectsCount: uint64(flat.Objects),
|
||||
ObjectsCount: flat.Objects,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
}
|
||||
}
|
||||
@@ -345,7 +360,7 @@ func (d *dataUsageCache) bucketUsageInfo(bucket string) BucketUsageInfo {
|
||||
flat := d.flatten(*e)
|
||||
return BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
ObjectsCount: uint64(flat.Objects),
|
||||
ObjectsCount: flat.Objects,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
}
|
||||
}
|
||||
@@ -413,10 +428,15 @@ func (d *dataUsageCache) merge(other dataUsageCache) {
|
||||
}
|
||||
}
|
||||
|
||||
type objectIO interface {
|
||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
}
|
||||
|
||||
// load the cache content with name from minioMetaBackgroundOpsBucket.
|
||||
// Only backend errors are returned as errors.
|
||||
// If the object is not found or unable to deserialize d is cleared and nil error is returned.
|
||||
func (d *dataUsageCache) load(ctx context.Context, store ObjectLayer, name string) error {
|
||||
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
|
||||
var buf bytes.Buffer
|
||||
err := store.GetObject(ctx, dataUsageBucket, name, 0, -1, &buf, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
@@ -435,7 +455,7 @@ func (d *dataUsageCache) load(ctx context.Context, store ObjectLayer, name strin
|
||||
}
|
||||
|
||||
// save the content of the cache to minioMetaBackgroundOpsBucket with the provided name.
|
||||
func (d *dataUsageCache) save(ctx context.Context, store ObjectLayer, name string) error {
|
||||
func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) error {
|
||||
b := d.serialize()
|
||||
size := int64(len(b))
|
||||
r, err := hash.NewReader(bytes.NewReader(b), size, "", "", size, false)
|
||||
|
||||
@@ -240,7 +240,6 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
t.Fatal("got nil result")
|
||||
}
|
||||
if w.flatten {
|
||||
t.Log(e.Children)
|
||||
*e = got.flatten(*e)
|
||||
}
|
||||
if e.Size != int64(w.size) {
|
||||
@@ -360,6 +359,13 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got.root() == nil {
|
||||
t.Log("cached folders:")
|
||||
for folder := range got.Cache {
|
||||
t.Log("folder:", folder)
|
||||
}
|
||||
t.Fatal("got nil root.")
|
||||
}
|
||||
|
||||
// Test dirs
|
||||
var want = []struct {
|
||||
|
||||
@@ -46,7 +46,7 @@ const (
|
||||
cacheMetaJSONFile = "cache.json"
|
||||
cacheDataFile = "part.1"
|
||||
cacheMetaVersion = "1.0.0"
|
||||
cacheExpiryDays = time.Duration(90 * time.Hour * 24) // defaults to 90 days
|
||||
cacheExpiryDays = 90 * time.Hour * 24 // defaults to 90 days
|
||||
// SSECacheEncrypted is the metadata key indicating that the object
|
||||
// is a cache entry encrypted with cache KMS master key in globalCacheKMS.
|
||||
SSECacheEncrypted = "X-Minio-Internal-Encrypted-Cache"
|
||||
@@ -157,7 +157,7 @@ func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCa
|
||||
}
|
||||
cache := diskCache{
|
||||
dir: dir,
|
||||
triggerGC: make(chan struct{}),
|
||||
triggerGC: make(chan struct{}, 1),
|
||||
stats: CacheDiskStats{Dir: dir},
|
||||
quotaPct: quotaPct,
|
||||
after: config.After,
|
||||
@@ -174,7 +174,7 @@ func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCa
|
||||
nsMutex: newNSLock(false),
|
||||
}
|
||||
go cache.purgeWait(ctx)
|
||||
cache.diskUsageHigh() // update if cache usage is already high.
|
||||
cache.diskSpaceAvailable(0) // update if cache usage is already high.
|
||||
cache.NewNSLockFn = func(ctx context.Context, cachePath string) RWLocker {
|
||||
return cache.nsMutex.NewNSLock(ctx, nil, cachePath, "")
|
||||
}
|
||||
@@ -194,7 +194,7 @@ func (c *diskCache) diskUsageLow() bool {
|
||||
logger.LogIf(ctx, err)
|
||||
return false
|
||||
}
|
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total
|
||||
usedPercent := (di.Used / di.Total) * 100
|
||||
low := int(usedPercent) < gcStopPct
|
||||
atomic.StoreUint64(&c.stats.UsagePercent, usedPercent)
|
||||
if low {
|
||||
@@ -203,9 +203,9 @@ func (c *diskCache) diskUsageLow() bool {
|
||||
return low
|
||||
}
|
||||
|
||||
// Returns if the disk usage reaches high water mark w.r.t the configured cache quota.
|
||||
// gc starts if high water mark reached.
|
||||
func (c *diskCache) diskUsageHigh() bool {
|
||||
// Returns if the disk usage reaches or exceeds configured cache quota when size is added.
|
||||
// If current usage without size exceeds high watermark a GC is automatically queued.
|
||||
func (c *diskCache) diskSpaceAvailable(size int64) bool {
|
||||
gcTriggerPct := c.quotaPct * c.highWatermark / 100
|
||||
di, err := disk.GetInfo(c.dir)
|
||||
if err != nil {
|
||||
@@ -214,27 +214,30 @@ func (c *diskCache) diskUsageHigh() bool {
|
||||
logger.LogIf(ctx, err)
|
||||
return false
|
||||
}
|
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total
|
||||
high := int(usedPercent) >= gcTriggerPct
|
||||
atomic.StoreUint64(&c.stats.UsagePercent, usedPercent)
|
||||
if high {
|
||||
atomic.StoreInt32(&c.stats.UsageState, 1)
|
||||
}
|
||||
return high
|
||||
}
|
||||
|
||||
// Returns if size space can be allocated without exceeding
|
||||
// max disk usable for caching
|
||||
func (c *diskCache) diskAvailable(size int64) bool {
|
||||
di, err := disk.GetInfo(c.dir)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
if di.Total == 0 {
|
||||
logger.Info("diskCache: Received 0 total disk size")
|
||||
return false
|
||||
}
|
||||
usedPercent := (di.Total - (di.Free - uint64(size))) * 100 / di.Total
|
||||
return int(usedPercent) < c.quotaPct
|
||||
usedPercent := float64(di.Used) * 100 / float64(di.Total)
|
||||
if usedPercent >= float64(gcTriggerPct) {
|
||||
atomic.StoreInt32(&c.stats.UsageState, 1)
|
||||
c.queueGC()
|
||||
}
|
||||
atomic.StoreUint64(&c.stats.UsagePercent, uint64(usedPercent))
|
||||
|
||||
// Recalculate percentage with provided size added.
|
||||
usedPercent = float64(di.Used+uint64(size)) * 100 / float64(di.Total)
|
||||
|
||||
return usedPercent < float64(c.quotaPct)
|
||||
}
|
||||
|
||||
// queueGC will queue a GC.
|
||||
// Calling this function is always non-blocking.
|
||||
func (c *diskCache) queueGC() {
|
||||
select {
|
||||
case c.triggerGC <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// toClear returns how many bytes should be cleared to reach the low watermark quota.
|
||||
@@ -247,7 +250,7 @@ func (c *diskCache) toClear() uint64 {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
return bytesToClear(int64(di.Total), int64(di.Free), uint64(c.quotaPct), uint64(c.lowWatermark))
|
||||
return bytesToClear(int64(di.Total), int64(di.Free), uint64(c.quotaPct), uint64(c.lowWatermark), uint64(c.highWatermark))
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -417,7 +420,7 @@ func (c *diskCache) Stat(ctx context.Context, bucket, object string) (oi ObjectI
|
||||
func (c *diskCache) statCachedMeta(ctx context.Context, cacheObjPath string) (meta *cacheMeta, partial bool, numHits int, err error) {
|
||||
|
||||
cLock := c.NewNSLockFn(ctx, cacheObjPath)
|
||||
if err = cLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
if err = cLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -499,7 +502,7 @@ func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *c
|
||||
func (c *diskCache) SaveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64, rs *HTTPRangeSpec, rsFileName string, incHitsOnly bool) error {
|
||||
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, cachedPath)
|
||||
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
||||
if err := cLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
@@ -658,14 +661,13 @@ func newCacheEncryptMetadata(bucket, object string, metadata map[string]string)
|
||||
|
||||
// Caches the object to disk
|
||||
func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Reader, size int64, rs *HTTPRangeSpec, opts ObjectOptions, incHitsOnly bool) error {
|
||||
if c.diskUsageHigh() {
|
||||
c.triggerGC <- struct{}{}
|
||||
if !c.diskSpaceAvailable(size) {
|
||||
io.Copy(ioutil.Discard, data)
|
||||
return errDiskFull
|
||||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, cachePath)
|
||||
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
||||
if err := cLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
@@ -688,16 +690,13 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
|
||||
if rs != nil {
|
||||
return c.putRange(ctx, bucket, object, data, size, rs, opts)
|
||||
}
|
||||
if !c.diskAvailable(size) {
|
||||
if !c.diskSpaceAvailable(size) {
|
||||
return errDiskFull
|
||||
}
|
||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
var metadata = make(map[string]string)
|
||||
for k, v := range opts.UserDefined {
|
||||
metadata[k] = v
|
||||
}
|
||||
var metadata = cloneMSS(opts.UserDefined)
|
||||
var reader = data
|
||||
var actualSize = uint64(size)
|
||||
if globalCacheKMS != nil {
|
||||
@@ -719,7 +718,7 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
|
||||
|
||||
if actualSize != uint64(n) {
|
||||
removeAll(cachePath)
|
||||
return IncompleteBody{}
|
||||
return IncompleteBody{Bucket: bucket, Object: object}
|
||||
}
|
||||
return c.saveMetadata(ctx, bucket, object, metadata, n, nil, "", incHitsOnly)
|
||||
}
|
||||
@@ -730,17 +729,14 @@ func (c *diskCache) putRange(ctx context.Context, bucket, object string, data io
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !c.diskAvailable(rlen) {
|
||||
if !c.diskSpaceAvailable(rlen) {
|
||||
return errDiskFull
|
||||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
var metadata = make(map[string]string)
|
||||
for k, v := range opts.UserDefined {
|
||||
metadata[k] = v
|
||||
}
|
||||
var metadata = cloneMSS(opts.UserDefined)
|
||||
var reader = data
|
||||
var actualSize = uint64(rlen)
|
||||
// objSize is the actual size of object (with encryption overhead if any)
|
||||
@@ -766,7 +762,7 @@ func (c *diskCache) putRange(ctx context.Context, bucket, object string, data io
|
||||
}
|
||||
if actualSize != uint64(n) {
|
||||
removeAll(cachePath)
|
||||
return IncompleteBody{}
|
||||
return IncompleteBody{Bucket: bucket, Object: object}
|
||||
}
|
||||
return c.saveMetadata(ctx, bucket, object, metadata, int64(objSize), rs, cacheFile, false)
|
||||
}
|
||||
@@ -871,7 +867,7 @@ func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, of
|
||||
func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, numHits int, err error) {
|
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, cacheObjPath)
|
||||
if err := cLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
if err := cLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
return nil, numHits, err
|
||||
}
|
||||
|
||||
@@ -935,7 +931,7 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
|
||||
// Deletes the cached object
|
||||
func (c *diskCache) delete(ctx context.Context, cacheObjPath string) (err error) {
|
||||
cLock := c.NewNSLockFn(ctx, cacheObjPath)
|
||||
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
||||
if err := cLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
|
||||
@@ -102,7 +102,7 @@ func (c *cacheControl) isStale(modTime time.Time) bool {
|
||||
func cacheControlOpts(o ObjectInfo) *cacheControl {
|
||||
c := cacheControl{}
|
||||
m := o.UserDefined
|
||||
if o.Expires != timeSentinel {
|
||||
if !o.Expires.Equal(timeSentinel) {
|
||||
c.expiry = o.Expires
|
||||
}
|
||||
|
||||
@@ -489,9 +489,15 @@ func (f *fileScorer) queueString() string {
|
||||
// bytesToClear() returns the number of bytes to clear to reach low watermark
|
||||
// w.r.t quota given disk total and free space, quota in % allocated to cache
|
||||
// and low watermark % w.r.t allowed quota.
|
||||
func bytesToClear(total, free int64, quotaPct, lowWatermark uint64) uint64 {
|
||||
used := (total - free)
|
||||
// If the high watermark hasn't been reached 0 will be returned.
|
||||
func bytesToClear(total, free int64, quotaPct, lowWatermark, highWatermark uint64) uint64 {
|
||||
used := total - free
|
||||
quotaAllowed := total * (int64)(quotaPct) / 100
|
||||
lowWMUsage := (total * (int64)(lowWatermark*quotaPct) / (100 * 100))
|
||||
highWMUsage := total * (int64)(highWatermark*quotaPct) / (100 * 100)
|
||||
if used < highWMUsage {
|
||||
return 0
|
||||
}
|
||||
// Return bytes needed to reach low watermark.
|
||||
lowWMUsage := total * (int64)(lowWatermark*quotaPct) / (100 * 100)
|
||||
return (uint64)(math.Min(float64(quotaAllowed), math.Max(0.0, float64(used-lowWMUsage))))
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestGetCacheControlOpts(t *testing.T) {
|
||||
t.Run("", func(t *testing.T) {
|
||||
m := make(map[string]string)
|
||||
m["cache-control"] = testCase.cacheControlHeaderVal
|
||||
if testCase.expiryHeaderVal != timeSentinel {
|
||||
if !testCase.expiryHeaderVal.Equal(timeSentinel) {
|
||||
m["expires"] = testCase.expiryHeaderVal.String()
|
||||
}
|
||||
c := cacheControlOpts(ObjectInfo{UserDefined: m, Expires: testCase.expiryHeaderVal})
|
||||
@@ -149,22 +149,26 @@ func TestNewFileScorer(t *testing.T) {
|
||||
}
|
||||
func TestBytesToClear(t *testing.T) {
|
||||
testCases := []struct {
|
||||
total int64
|
||||
free int64
|
||||
quotaPct uint64
|
||||
watermarkLow uint64
|
||||
expected uint64
|
||||
total int64
|
||||
free int64
|
||||
quotaPct uint64
|
||||
watermarkLow uint64
|
||||
watermarkHigh uint64
|
||||
expected uint64
|
||||
}{
|
||||
{1000, 800, 40, 90, 0},
|
||||
{1000, 200, 40, 90, 400},
|
||||
{1000, 400, 40, 90, 240},
|
||||
{1000, 600, 40, 90, 40},
|
||||
{1000, 600, 40, 70, 120},
|
||||
{1000, 1000, 90, 70, 0},
|
||||
{1000, 0, 90, 70, 370},
|
||||
{total: 1000, free: 800, quotaPct: 40, watermarkLow: 90, watermarkHigh: 90, expected: 0},
|
||||
{total: 1000, free: 200, quotaPct: 40, watermarkLow: 90, watermarkHigh: 90, expected: 400},
|
||||
{total: 1000, free: 400, quotaPct: 40, watermarkLow: 90, watermarkHigh: 90, expected: 240},
|
||||
{total: 1000, free: 600, quotaPct: 40, watermarkLow: 90, watermarkHigh: 90, expected: 40},
|
||||
{total: 1000, free: 600, quotaPct: 40, watermarkLow: 70, watermarkHigh: 70, expected: 120},
|
||||
{total: 1000, free: 1000, quotaPct: 90, watermarkLow: 70, watermarkHigh: 70, expected: 0},
|
||||
|
||||
// High not yet reached..
|
||||
{total: 1000, free: 250, quotaPct: 100, watermarkLow: 50, watermarkHigh: 90, expected: 0},
|
||||
{total: 1000, free: 250, quotaPct: 100, watermarkLow: 50, watermarkHigh: 90, expected: 0},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
toClear := bytesToClear(tc.total, tc.free, tc.quotaPct, tc.watermarkLow)
|
||||
toClear := bytesToClear(tc.total, tc.free, tc.quotaPct, tc.watermarkLow, tc.watermarkHigh)
|
||||
if tc.expected != toClear {
|
||||
t.Errorf("test %d expected %v, got %v", i, tc.expected, toClear)
|
||||
}
|
||||
|
||||
@@ -85,16 +85,15 @@ type cacheObjects struct {
|
||||
}
|
||||
|
||||
func (c *cacheObjects) incHitsToMeta(ctx context.Context, dcache *diskCache, bucket, object string, size int64, eTag string, rs *HTTPRangeSpec) error {
|
||||
metadata := make(map[string]string)
|
||||
metadata["etag"] = eTag
|
||||
metadata := map[string]string{"etag": eTag}
|
||||
return dcache.SaveMetadata(ctx, bucket, object, metadata, size, rs, "", true)
|
||||
}
|
||||
|
||||
// Backend metadata could have changed through server side copy - reset cache metadata if that is the case
|
||||
func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *diskCache, bucket, object string, bkObjectInfo, cacheObjInfo ObjectInfo, rs *HTTPRangeSpec) error {
|
||||
|
||||
bkMeta := make(map[string]string)
|
||||
cacheMeta := make(map[string]string)
|
||||
bkMeta := make(map[string]string, len(bkObjectInfo.UserDefined))
|
||||
cacheMeta := make(map[string]string, len(cacheObjInfo.UserDefined))
|
||||
for k, v := range bkObjectInfo.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
@@ -166,13 +165,13 @@ func (c *cacheObjects) DeleteObjects(ctx context.Context, bucket string, objects
|
||||
|
||||
// construct a metadata k-v map
|
||||
func getMetadata(objInfo ObjectInfo) map[string]string {
|
||||
metadata := make(map[string]string)
|
||||
metadata := make(map[string]string, len(objInfo.UserDefined)+4)
|
||||
metadata["etag"] = objInfo.ETag
|
||||
metadata["content-type"] = objInfo.ContentType
|
||||
if objInfo.ContentEncoding != "" {
|
||||
metadata["content-encoding"] = objInfo.ContentEncoding
|
||||
}
|
||||
if objInfo.Expires != timeSentinel {
|
||||
if !objInfo.Expires.Equal(timeSentinel) {
|
||||
metadata["expires"] = objInfo.Expires.Format(http.TimeFormat)
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
@@ -284,12 +283,6 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||
// Reaching here implies cache miss
|
||||
c.cacheStats.incMiss()
|
||||
|
||||
// Since we got here, we are serving the request from backend,
|
||||
// and also adding the object to the cache.
|
||||
if dcache.diskUsageHigh() {
|
||||
dcache.triggerGC <- struct{}{} // this is non-blocking
|
||||
}
|
||||
|
||||
bkReader, bkErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
||||
|
||||
if bkErr != nil {
|
||||
@@ -306,7 +299,9 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||
if cacheErr == nil {
|
||||
bkReader.ObjInfo.CacheLookupStatus = CacheHit
|
||||
}
|
||||
if !dcache.diskAvailable(objInfo.Size) {
|
||||
|
||||
// Check if we can add it without exceeding total cache size.
|
||||
if !dcache.diskSpaceAvailable(objInfo.Size) {
|
||||
return bkReader, bkErr
|
||||
}
|
||||
|
||||
@@ -317,16 +312,18 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||
rs = nil
|
||||
}
|
||||
// fill cache in the background for range GET requests
|
||||
bReader, bErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
||||
bReader, bErr := c.GetObjectNInfoFn(GlobalContext, bucket, object, rs, h, lockType, opts)
|
||||
if bErr != nil {
|
||||
return
|
||||
}
|
||||
defer bReader.Close()
|
||||
oi, _, _, err := dcache.statRange(ctx, bucket, object, rs)
|
||||
oi, _, _, err := dcache.statRange(GlobalContext, bucket, object, rs)
|
||||
// avoid cache overwrite if another background routine filled cache
|
||||
if err != nil || oi.ETag != bReader.ObjInfo.ETag {
|
||||
// use a new context to avoid locker prematurely timing out operation when the GetObjectNInfo returns.
|
||||
dcache.Put(context.Background(), bucket, object, bReader, bReader.ObjInfo.Size, rs, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)}, false)
|
||||
dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, rs, ObjectOptions{
|
||||
UserDefined: getMetadata(bReader.ObjInfo),
|
||||
}, false)
|
||||
return
|
||||
}
|
||||
}()
|
||||
@@ -336,8 +333,13 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
teeReader := io.TeeReader(bkReader, pipeWriter)
|
||||
userDefined := getMetadata(bkReader.ObjInfo)
|
||||
go func() {
|
||||
putErr := dcache.Put(ctx, bucket, object, io.LimitReader(pipeReader, bkReader.ObjInfo.Size), bkReader.ObjInfo.Size, nil, ObjectOptions{UserDefined: getMetadata(bkReader.ObjInfo)}, false)
|
||||
putErr := dcache.Put(ctx, bucket, object,
|
||||
io.LimitReader(pipeReader, bkReader.ObjInfo.Size),
|
||||
bkReader.ObjInfo.Size, nil, ObjectOptions{
|
||||
UserDefined: userDefined,
|
||||
}, false)
|
||||
// close the write end of the pipe, so the error gets
|
||||
// propagated to getObjReader
|
||||
pipeWriter.CloseWithError(putErr)
|
||||
@@ -606,9 +608,10 @@ func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r *
|
||||
}
|
||||
|
||||
// fetch from backend if there is no space on cache drive
|
||||
if !dcache.diskAvailable(size) {
|
||||
if !dcache.diskSpaceAvailable(size) {
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
}
|
||||
|
||||
if opts.ServerSideEncryption != nil {
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
@@ -634,15 +637,15 @@ func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r *
|
||||
if err == nil {
|
||||
go func() {
|
||||
// fill cache in the background
|
||||
bReader, bErr := c.GetObjectNInfoFn(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
bReader, bErr := c.GetObjectNInfoFn(GlobalContext, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if bErr != nil {
|
||||
return
|
||||
}
|
||||
defer bReader.Close()
|
||||
oi, _, err := dcache.Stat(ctx, bucket, object)
|
||||
oi, _, err := dcache.Stat(GlobalContext, bucket, object)
|
||||
// avoid cache overwrite if another background routine filled cache
|
||||
if err != nil || oi.ETag != bReader.ObjInfo.ETag {
|
||||
dcache.Put(ctx, bucket, object, bReader, bReader.ObjInfo.Size, nil, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)}, false)
|
||||
dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, nil, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)}, false)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -715,7 +718,9 @@ func (c *cacheObjects) gc(ctx context.Context) {
|
||||
}
|
||||
for _, dcache := range c.cache {
|
||||
if dcache != nil {
|
||||
dcache.triggerGC <- struct{}{}
|
||||
// Check if there is disk.
|
||||
// Will queue a GC scan if at high watermark.
|
||||
dcache.diskSpaceAvailable(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ func TestCacheMetadataObjInfo(t *testing.T) {
|
||||
if objInfo.Size != 0 {
|
||||
t.Fatal("Unexpected object info value for Size", objInfo.Size)
|
||||
}
|
||||
if objInfo.ModTime != timeSentinel {
|
||||
if !objInfo.ModTime.Equal(timeSentinel) {
|
||||
t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime)
|
||||
}
|
||||
if objInfo.IsDir {
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -26,7 +27,8 @@ const (
|
||||
dynamicTimeoutIncreaseThresholdPct = 0.33 // Upper threshold for failures in order to increase timeout
|
||||
dynamicTimeoutDecreaseThresholdPct = 0.10 // Lower threshold for failures in order to decrease timeout
|
||||
dynamicTimeoutLogSize = 16
|
||||
maxDuration = time.Duration(1<<63 - 1)
|
||||
maxDuration = math.MaxInt64
|
||||
maxDynamicTimeout = 24 * time.Hour // Never set timeout bigger than this.
|
||||
)
|
||||
|
||||
// timeouts that are dynamically adapted based on actual usage results
|
||||
@@ -40,6 +42,12 @@ type dynamicTimeout struct {
|
||||
|
||||
// newDynamicTimeout returns a new dynamic timeout initialized with timeout value
|
||||
func newDynamicTimeout(timeout, minimum time.Duration) *dynamicTimeout {
|
||||
if timeout <= 0 || minimum <= 0 {
|
||||
panic("newDynamicTimeout: negative or zero timeout")
|
||||
}
|
||||
if minimum > timeout {
|
||||
minimum = timeout
|
||||
}
|
||||
return &dynamicTimeout{timeout: int64(timeout), minimum: int64(minimum)}
|
||||
}
|
||||
|
||||
@@ -61,60 +69,73 @@ func (dt *dynamicTimeout) LogFailure() {
|
||||
|
||||
// logEntry stores a log entry
|
||||
func (dt *dynamicTimeout) logEntry(duration time.Duration) {
|
||||
if duration < 0 {
|
||||
return
|
||||
}
|
||||
entries := int(atomic.AddInt64(&dt.entries, 1))
|
||||
index := entries - 1
|
||||
if index < dynamicTimeoutLogSize {
|
||||
dt.mutex.Lock()
|
||||
dt.log[index] = duration
|
||||
|
||||
// We leak entries while we copy
|
||||
if entries == dynamicTimeoutLogSize {
|
||||
|
||||
// Make copy on stack in order to call adjust()
|
||||
logCopy := [dynamicTimeoutLogSize]time.Duration{}
|
||||
copy(logCopy[:], dt.log[:])
|
||||
|
||||
// reset log entries
|
||||
atomic.StoreInt64(&dt.entries, 0)
|
||||
dt.mutex.Unlock()
|
||||
|
||||
dt.adjust(logCopy)
|
||||
return
|
||||
}
|
||||
dt.mutex.Unlock()
|
||||
}
|
||||
if entries == dynamicTimeoutLogSize {
|
||||
dt.mutex.Lock()
|
||||
|
||||
// Make copy on stack in order to call adjust()
|
||||
logCopy := [dynamicTimeoutLogSize]time.Duration{}
|
||||
copy(logCopy[:], dt.log[:])
|
||||
|
||||
// reset log entries
|
||||
atomic.StoreInt64(&dt.entries, 0)
|
||||
|
||||
dt.mutex.Unlock()
|
||||
|
||||
dt.adjust(logCopy)
|
||||
}
|
||||
}
|
||||
|
||||
// adjust changes the value of the dynamic timeout based on the
|
||||
// previous results
|
||||
func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) {
|
||||
|
||||
failures, average := 0, int64(0)
|
||||
for i := 0; i < len(entries); i++ {
|
||||
if entries[i] == maxDuration {
|
||||
failures, max := 0, time.Duration(0)
|
||||
for _, dur := range entries[:] {
|
||||
if dur == maxDuration {
|
||||
failures++
|
||||
} else {
|
||||
average += int64(entries[i])
|
||||
} else if dur > max {
|
||||
max = dur
|
||||
}
|
||||
}
|
||||
if failures < len(entries) {
|
||||
average /= int64(len(entries) - failures)
|
||||
}
|
||||
|
||||
timeOutHitPct := float64(failures) / float64(len(entries))
|
||||
failPct := float64(failures) / float64(len(entries))
|
||||
|
||||
if timeOutHitPct > dynamicTimeoutIncreaseThresholdPct {
|
||||
if failPct > dynamicTimeoutIncreaseThresholdPct {
|
||||
// We are hitting the timeout too often, so increase the timeout by 25%
|
||||
timeout := atomic.LoadInt64(&dt.timeout) * 125 / 100
|
||||
atomic.StoreInt64(&dt.timeout, timeout)
|
||||
} else if timeOutHitPct < dynamicTimeoutDecreaseThresholdPct {
|
||||
// We are hitting the timeout relatively few times, so decrease the timeout
|
||||
average = average * 125 / 100 // Add buffer of 25% on top of average
|
||||
|
||||
timeout := (atomic.LoadInt64(&dt.timeout) + int64(average)) / 2 // Middle between current timeout and average success
|
||||
// Set upper cap.
|
||||
if timeout > int64(maxDynamicTimeout) {
|
||||
timeout = int64(maxDynamicTimeout)
|
||||
}
|
||||
// Safety, shouldn't happen
|
||||
if timeout < dt.minimum {
|
||||
timeout = dt.minimum
|
||||
}
|
||||
atomic.StoreInt64(&dt.timeout, timeout)
|
||||
} else if failPct < dynamicTimeoutDecreaseThresholdPct {
|
||||
// We are hitting the timeout relatively few times,
|
||||
// so decrease the timeout towards 25 % of maximum time spent.
|
||||
max = max * 125 / 100
|
||||
|
||||
timeout := atomic.LoadInt64(&dt.timeout)
|
||||
if max < time.Duration(timeout) {
|
||||
// Move 50% toward the max.
|
||||
timeout = (int64(max) + timeout) / 2
|
||||
}
|
||||
if timeout < dt.minimum {
|
||||
timeout = dt.minimum
|
||||
}
|
||||
atomic.StoreInt64(&dt.timeout, timeout)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user