Compare commits
192 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6484453fc6 | ||
|
|
a0d0645128 | ||
|
|
1738eb24b1 | ||
|
|
253194e491 | ||
|
|
736e58dd68 | ||
|
|
907a171edd | ||
|
|
ed6d2a100f | ||
|
|
effe131090 | ||
|
|
01498a3e34 | ||
|
|
18063bf25c | ||
|
|
57f0176759 | ||
|
|
dbbed6f7f0 | ||
|
|
7fbfdceba3 | ||
|
|
9dda9fb903 | ||
|
|
f1418a50f0 | ||
|
|
017954e7ea | ||
|
|
806625cbff | ||
|
|
045e30f2c1 | ||
|
|
c6a9a94f94 | ||
|
|
8e7c00f3d4 | ||
|
|
d1ed1da8c6 | ||
|
|
23e8390997 | ||
|
|
71403be912 | ||
|
|
f28d02b7f2 | ||
|
|
e0cb814f3f | ||
|
|
98a08e1644 | ||
|
|
3047121255 | ||
|
|
5a7f92481e | ||
|
|
0d45c38782 | ||
|
|
56d1b227cf | ||
|
|
061fa0635c | ||
|
|
6e138f955e | ||
|
|
bea87a5a20 | ||
|
|
2b4eb87d77 | ||
|
|
799758e54f | ||
|
|
1f9abbee4d | ||
|
|
fdf0ae9167 | ||
|
|
00eb6f6bc9 | ||
|
|
66174692a2 | ||
|
|
849fcf0127 | ||
|
|
209680e89f | ||
|
|
e0c04a2da0 | ||
|
|
27d9bd04e5 | ||
|
|
511424a287 | ||
|
|
bebcf4f004 | ||
|
|
eafa775952 | ||
|
|
66b4a862e0 | ||
|
|
9603489dd3 | ||
|
|
b302c8a5f4 | ||
|
|
4de88e87bb | ||
|
|
b880796aef | ||
|
|
37a5d5d7a0 | ||
|
|
3cac262dd1 | ||
|
|
e6ab4db6b8 | ||
|
|
ca989eb0b3 | ||
|
|
d778d034e7 | ||
|
|
df08fd1f03 | ||
|
|
ac82f416a4 | ||
|
|
f7f9517b6a | ||
|
|
90cff10e2b | ||
|
|
81caf35926 | ||
|
|
5726cef3ca | ||
|
|
5fdf47b118 | ||
|
|
8b74a72b21 | ||
|
|
eec69d6796 | ||
|
|
0537a21b79 | ||
|
|
4c54ed8748 | ||
|
|
a4006e23a0 | ||
|
|
b17dc81540 | ||
|
|
d73c4f09f3 | ||
|
|
4c81201f95 | ||
|
|
cd8d511d3d | ||
|
|
899a2fa1c7 | ||
|
|
17e17da00d | ||
|
|
a5da9120f3 | ||
|
|
aa12d75d75 | ||
|
|
dd4a2d7419 | ||
|
|
6fcbdd5607 | ||
|
|
3831cc9e3b | ||
|
|
230fc0d186 | ||
|
|
7f9498f43f | ||
|
|
1cf322b7d4 | ||
|
|
3168e93730 | ||
|
|
0b1c824618 | ||
|
|
c851e022b7 | ||
|
|
6f45e303f5 | ||
|
|
5ad032826a | ||
|
|
84bf4624a4 | ||
|
|
dff37aa33d | ||
|
|
d12831eb07 | ||
|
|
4a36cd7035 | ||
|
|
00555c747e | ||
|
|
03490c811b | ||
|
|
48d2c03250 | ||
|
|
ed78854cea | ||
|
|
e60834838f | ||
|
|
d616d8a857 | ||
|
|
24cab7f9df | ||
|
|
b2536476c9 | ||
|
|
02c1a08a5b | ||
|
|
5c47ce456e | ||
|
|
8ea55f9dba | ||
|
|
80e3dce631 | ||
|
|
80fab03b63 | ||
|
|
730d2dc7be | ||
|
|
0ee9678190 | ||
|
|
34859c6d4b | ||
|
|
b1c99e88ac | ||
|
|
0104af6bcc | ||
|
|
224daee391 | ||
|
|
34ea1d2167 | ||
|
|
9d95937018 | ||
|
|
74a7889a3e | ||
|
|
fa01e640f5 | ||
|
|
f355374962 | ||
|
|
bda0fe3150 | ||
|
|
b70995dd60 | ||
|
|
4b6264da7d | ||
|
|
48919de301 | ||
|
|
eb3ded420e | ||
|
|
eb2934f0c1 | ||
|
|
b7438fe4e6 | ||
|
|
ce6cef6855 | ||
|
|
a966ccd17d | ||
|
|
493c714663 | ||
|
|
e959c5d71c | ||
|
|
4a2928eb49 | ||
|
|
af88772a78 | ||
|
|
1dce6918c2 | ||
|
|
9109148474 | ||
|
|
eaaf05a7cc | ||
|
|
52e21bc853 | ||
|
|
16e1a25bc0 | ||
|
|
958661cbb5 | ||
|
|
6019628f7d | ||
|
|
0987069e37 | ||
|
|
6a0372be6c | ||
|
|
c13afd56e8 | ||
|
|
96997d2b21 | ||
|
|
86a3319d41 | ||
|
|
a694ba93d9 | ||
|
|
9f60e84ce1 | ||
|
|
a9aaea0d67 | ||
|
|
572b1721b2 | ||
|
|
b0e1d4ce78 | ||
|
|
eb19c8af40 | ||
|
|
746f1585eb | ||
|
|
2d58a8d861 | ||
|
|
0037951b6e | ||
|
|
fbd1c5f51a | ||
|
|
1c6781757c | ||
|
|
b4e3956e69 | ||
|
|
c51229493b | ||
|
|
631d55aa22 | ||
|
|
8a291e1dc0 | ||
|
|
650dccfa9e | ||
|
|
d08b4b147d | ||
|
|
9a703befe6 | ||
|
|
9a1615768d | ||
|
|
37da0c647e | ||
|
|
2acb530ccd | ||
|
|
3e1fb17b70 | ||
|
|
a89d6b8e3d | ||
|
|
1c085f7d1a | ||
|
|
4b6585d249 | ||
|
|
9ffad7fceb | ||
|
|
18725679c4 | ||
|
|
ba8a8ad818 | ||
|
|
102ad60dee | ||
|
|
cb61e50b51 | ||
|
|
859ef52886 | ||
|
|
f04a1f220c | ||
|
|
cd380251b3 | ||
|
|
92cd1eed45 | ||
|
|
db32a24cb6 | ||
|
|
2d96940826 | ||
|
|
e730da1438 | ||
|
|
46ee8659b4 | ||
|
|
73a6b4ea11 | ||
|
|
c1b88c17cc | ||
|
|
a359e36e35 | ||
|
|
0a2e6d58a5 | ||
|
|
7e80afdd7f | ||
|
|
1b119557c2 | ||
|
|
7778fef6bb | ||
|
|
ea1803417f | ||
|
|
ea5094e842 | ||
|
|
5a974fb10c | ||
|
|
9acdeab73d | ||
|
|
d19b434ffc | ||
|
|
17a1eda702 | ||
|
|
7d50a0cfea |
1
.github/stale.yml
vendored
1
.github/stale.yml
vendored
@@ -14,6 +14,7 @@ onlyLabels: []
|
||||
exemptLabels:
|
||||
- "security"
|
||||
- "pending discussion"
|
||||
- "do not close"
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
51
.github/workflows/codeql.yml
vendored
51
.github/workflows/codeql.yml
vendored
@@ -1,51 +0,0 @@
|
||||
name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 19 * * 0'
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
|
||||
# CodeQL runs on ubuntu-latest and windows-latest
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: go, javascript
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
4
.github/workflows/go.yml
vendored
4
.github/workflows/go.yml
vendored
@@ -37,10 +37,12 @@ jobs:
|
||||
GO111MODULE: on
|
||||
MINIO_CI_CD: 1
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
sudo apt-get install devscripts shellcheck
|
||||
nancy_version=$(curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/sonatype-nexus-community/nancy/releases/latest | sed "s/https:\/\/github.com\/sonatype-nexus-community\/nancy\/releases\/tag\///")
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-linux.amd64-${nancy_version} && chmod +x nancy
|
||||
go list -m all | ./nancy
|
||||
go list -m all | ./nancy sleuth
|
||||
make
|
||||
diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
CVE-2020-13223
|
||||
CVE-2020-7220
|
||||
CVE-2020-10661
|
||||
CVE-2020-10660
|
||||
CWE-190
|
||||
8
Makefile
8
Makefile
@@ -18,11 +18,17 @@ getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
|
||||
@which ruleguard 1>/dev/null || (echo "Installing ruleguard" && GO111MODULE=off go get github.com/quasilyte/go-ruleguard/...)
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && GO111MODULE=off go get github.com/tinylib/msgp)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && GO111MODULE=off go get golang.org/x/tools/cmd/stringer)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps fmt lint ruleguard
|
||||
verifiers: getdeps fmt lint ruleguard check-gen
|
||||
|
||||
check-gen:
|
||||
@go generate ./... >/dev/null
|
||||
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
|
||||
|
||||
fmt:
|
||||
@echo "Running $@ check"
|
||||
|
||||
@@ -88,7 +88,7 @@ service minio start
|
||||
```
|
||||
|
||||
## Install from Source
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.13](https://golang.org/dl/#stable)
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.14](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
@@ -177,7 +177,7 @@ mc admin update <minio alias, e.g., myminio>
|
||||
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
|
||||
- `mc admin update` is disabled in kubernetes/container environments, container environments provide their own mechanisms to rollout of updates.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` to any new releases until all clusters have been successfully updated.
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)x
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)
|
||||
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
@@ -193,4 +193,4 @@ mc admin update <minio alias, e.g., myminio>
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
|
||||
## License
|
||||
Use of MinIO is governed by the Apache 2.0 License found at [LICENSE](./LICENSE).
|
||||
Use of MinIO is governed by the Apache 2.0 License found at [LICENSE](https://github.com/minio/minio/blob/master/LICENSE).
|
||||
|
||||
@@ -89,7 +89,7 @@ service minio start
|
||||
|
||||
## 使用源码安装
|
||||
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.13](https://golang.org/dl/#stable)
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.14](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
|
||||
@@ -57,22 +57,6 @@ export class BrowserDropdown extends React.Component {
|
||||
const { fetchServerInfo } = this.props
|
||||
fetchServerInfo()
|
||||
}
|
||||
fullScreen(e) {
|
||||
e.preventDefault()
|
||||
let el = document.documentElement
|
||||
if (el.requestFullscreen) {
|
||||
el.requestFullscreen()
|
||||
}
|
||||
if (el.mozRequestFullScreen) {
|
||||
el.mozRequestFullScreen()
|
||||
}
|
||||
if (el.webkitRequestFullscreen) {
|
||||
el.webkitRequestFullscreen()
|
||||
}
|
||||
if (el.msRequestFullscreen) {
|
||||
el.msRequestFullscreen()
|
||||
}
|
||||
}
|
||||
logout(e) {
|
||||
e.preventDefault()
|
||||
web.Logout()
|
||||
@@ -87,24 +71,30 @@ export class BrowserDropdown extends React.Component {
|
||||
<i className="fas fa-bars" />
|
||||
</Dropdown.Toggle>
|
||||
<Dropdown.Menu className="dropdown-menu-right">
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/?ref=ob">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://github.com/minio/minio">
|
||||
GitHub <i className="fab fa-github" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.fullScreen}>
|
||||
Fullscreen <i className="fas fa-expand" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://slack.min.io">
|
||||
Ask for help <i className="fas fa-question-circle" />
|
||||
<a target="_blank" href="https://min.io/pricing?ref=ob">
|
||||
Get Support <i className="fas fa-question-circle" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
@@ -118,20 +108,9 @@ export class BrowserDropdown extends React.Component {
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" id="logout" onClick={this.logout}>
|
||||
Sign Out <i className="fas fa-sign-out-alt" />
|
||||
Logout <i className="fas fa-sign-out-alt" />
|
||||
</a>
|
||||
</li>
|
||||
</Dropdown.Menu>
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import ObjectsSearch from "../objects/ObjectsSearch"
|
||||
import Path from "../objects/Path"
|
||||
import StorageInfo from "./StorageInfo"
|
||||
import BrowserDropdown from "./BrowserDropdown"
|
||||
@@ -27,6 +28,7 @@ export const Header = () => {
|
||||
<header className="fe-header">
|
||||
<Path />
|
||||
{loggedIn && <StorageInfo />}
|
||||
{loggedIn && <ObjectsSearch />}
|
||||
<ul className="feh-actions">
|
||||
{loggedIn ? (
|
||||
<BrowserDropdown />
|
||||
|
||||
@@ -22,7 +22,8 @@ const bucketsFilterSelector = state => state.buckets.filter
|
||||
export const getFilteredBuckets = createSelector(
|
||||
bucketsSelector,
|
||||
bucketsFilterSelector,
|
||||
(buckets, filter) => buckets.filter(bucket => bucket.indexOf(filter) > -1)
|
||||
(buckets, filter) => buckets.filter(
|
||||
bucket => bucket.toLowerCase().indexOf(filter.toLowerCase()) > -1)
|
||||
)
|
||||
|
||||
export const getCurrentBucket = state => state.buckets.currentBucket
|
||||
|
||||
@@ -18,6 +18,7 @@ import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import InfiniteScroll from "react-infinite-scroller"
|
||||
import ObjectsList from "./ObjectsList"
|
||||
import { getFilteredObjects } from "./selectors"
|
||||
|
||||
export class ObjectsListContainer extends React.Component {
|
||||
constructor(props) {
|
||||
@@ -39,22 +40,29 @@ export class ObjectsListContainer extends React.Component {
|
||||
})
|
||||
}
|
||||
}
|
||||
componentDidUpdate(prevProps) {
|
||||
if (this.props.filter !== prevProps.filter) {
|
||||
this.setState({
|
||||
page: 1
|
||||
})
|
||||
}
|
||||
}
|
||||
loadNextPage() {
|
||||
this.setState(state => {
|
||||
return { page: state.page + 1 }
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { objects, listLoading } = this.props
|
||||
const { filteredObjects, listLoading } = this.props
|
||||
|
||||
const visibleObjects = objects.slice(0, this.state.page * 100)
|
||||
const visibleObjects = filteredObjects.slice(0, this.state.page * 100)
|
||||
|
||||
return (
|
||||
<div style={{ position: "relative" }}>
|
||||
<InfiniteScroll
|
||||
pageStart={0}
|
||||
loadMore={this.loadNextPage}
|
||||
hasMore={objects.length > visibleObjects.length}
|
||||
hasMore={filteredObjects.length > visibleObjects.length}
|
||||
useWindow={true}
|
||||
initialLoad={false}
|
||||
>
|
||||
@@ -70,7 +78,8 @@ const mapStateToProps = state => {
|
||||
return {
|
||||
currentBucket: state.buckets.currentBucket,
|
||||
currentPrefix: state.objects.currentPrefix,
|
||||
objects: state.objects.list,
|
||||
filteredObjects: getFilteredObjects(state),
|
||||
filter: state.objects.filter,
|
||||
sortBy: state.objects.sortBy,
|
||||
sortOrder: state.objects.sortOrder,
|
||||
listLoading: state.objects.listLoading
|
||||
|
||||
43
browser/app/js/objects/ObjectsSearch.js
Normal file
43
browser/app/js/objects/ObjectsSearch.js
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import * as actionsObjects from "./actions"
|
||||
|
||||
export const ObjectsSearch = ({ onChange }) => (
|
||||
<div
|
||||
className="input-group ig-left ig-search-dark"
|
||||
style={{ display: "block" }}
|
||||
>
|
||||
<input
|
||||
className="ig-text"
|
||||
type="input"
|
||||
placeholder="Search Objects..."
|
||||
onChange={e => onChange(e.target.value)}
|
||||
/>
|
||||
<i className="ig-helpers" />
|
||||
</div>
|
||||
)
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
return {
|
||||
onChange: filter =>
|
||||
dispatch(actionsObjects.setFilter(filter))
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(undefined, mapDispatchToProps)(ObjectsSearch)
|
||||
@@ -20,13 +20,13 @@ import { ObjectsListContainer } from "../ObjectsListContainer"
|
||||
|
||||
describe("ObjectsList", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsListContainer objects={[]} />)
|
||||
shallow(<ObjectsListContainer filteredObjects={[]} />)
|
||||
})
|
||||
|
||||
it("should render ObjectsList with objects", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
objects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
filteredObjects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.find("ObjectsList").length).toBe(1)
|
||||
@@ -40,7 +40,7 @@ describe("ObjectsList", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
currentBucket="test1"
|
||||
objects={[]}
|
||||
filteredObjects={[]}
|
||||
listLoading={true}
|
||||
/>
|
||||
)
|
||||
|
||||
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { ObjectsSearch } from "../ObjectsSearch"
|
||||
|
||||
describe("ObjectsSearch", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsSearch />)
|
||||
})
|
||||
|
||||
it("should call onChange with search text", () => {
|
||||
const onChange = jest.fn()
|
||||
const wrapper = shallow(<ObjectsSearch onChange={onChange} />)
|
||||
wrapper.find("input").simulate("change", { target: { value: "test" } })
|
||||
expect(onChange).toHaveBeenCalledWith("test")
|
||||
})
|
||||
})
|
||||
@@ -23,6 +23,7 @@ describe("objects reducer", () => {
|
||||
const initialState = reducer(undefined, {})
|
||||
expect(initialState).toEqual({
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
|
||||
@@ -36,6 +36,7 @@ import { getServerInfo, hasServerPublicDomain } from '../browser/selectors'
|
||||
|
||||
export const SET_LIST = "objects/SET_LIST"
|
||||
export const RESET_LIST = "objects/RESET_LIST"
|
||||
export const SET_FILTER = "objects/SET_FILTER"
|
||||
export const APPEND_LIST = "objects/APPEND_LIST"
|
||||
export const REMOVE = "objects/REMOVE"
|
||||
export const SET_SORT_BY = "objects/SET_SORT_BY"
|
||||
@@ -57,6 +58,13 @@ export const resetList = () => ({
|
||||
type: RESET_LIST,
|
||||
})
|
||||
|
||||
export const setFilter = filter => {
|
||||
return {
|
||||
type: SET_FILTER,
|
||||
filter
|
||||
}
|
||||
}
|
||||
|
||||
export const setListLoading = (listLoading) => ({
|
||||
type: SET_LIST_LOADING,
|
||||
listLoading,
|
||||
|
||||
@@ -28,6 +28,7 @@ const removeObject = (list, objectToRemove, lookup) => {
|
||||
export default (
|
||||
state = {
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
@@ -53,6 +54,11 @@ export default (
|
||||
...state,
|
||||
list: []
|
||||
}
|
||||
case actionsObjects.SET_FILTER:
|
||||
return {
|
||||
...state,
|
||||
filter: action.filter
|
||||
}
|
||||
case actionsObjects.SET_LIST_LOADING:
|
||||
return {
|
||||
...state,
|
||||
|
||||
@@ -21,3 +21,13 @@ export const getCurrentPrefix = state => state.objects.currentPrefix
|
||||
export const getCheckedList = state => state.objects.checkedList
|
||||
|
||||
export const getPrefixWritable = state => state.objects.prefixWritable
|
||||
|
||||
const objectsSelector = state => state.objects.list
|
||||
const objectsFilterSelector = state => state.objects.filter
|
||||
|
||||
export const getFilteredObjects = createSelector(
|
||||
objectsSelector,
|
||||
objectsFilterSelector,
|
||||
(objects, filter) => objects.filter(
|
||||
object => object.name.toLowerCase().startsWith(filter.toLowerCase()))
|
||||
)
|
||||
@@ -36,7 +36,7 @@ export class Dropzone extends React.Component {
|
||||
// Overwrite the default styling from react-dropzone; otherwise it
|
||||
// won't handle child elements correctly.
|
||||
const style = {
|
||||
height: "100%",
|
||||
flex: "1",
|
||||
borderWidth: "0",
|
||||
borderStyle: "dashed",
|
||||
borderColor: "#fff"
|
||||
|
||||
@@ -20,7 +20,8 @@
|
||||
@media(max-width: @screen-sm-max) {
|
||||
padding: 75px 0 80px;
|
||||
}
|
||||
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
min-height:100vh;
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
@@ -169,6 +169,24 @@ select.form-control {
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search-dark {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
font-weight: 900;
|
||||
content: '\f002';
|
||||
font-size: 15px;
|
||||
position: absolute;
|
||||
left: 2px;
|
||||
top: 8px;
|
||||
color: rgba(0, 0, 0, 0.5);
|
||||
}
|
||||
|
||||
.ig-text {
|
||||
padding-left: 25px;
|
||||
.placeholder(rgba(0, 0, 0, 0.5))
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
@@ -270,4 +288,4 @@ select.form-control {
|
||||
.set-expire-decrease {
|
||||
bottom: -27px;
|
||||
.rotate(-180deg);
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -45,88 +45,63 @@ FUNCTIONAL_TESTS="$WORK_DIR/functional-tests.sh"
|
||||
function start_minio_fs()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 10
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure_sets()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk-sets{1...32}" >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets_ipv6()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address=:9002 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function run_test_fs()
|
||||
{
|
||||
minio_pid="$(start_minio_fs)"
|
||||
start_minio_fs
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -138,12 +113,12 @@ function run_test_fs()
|
||||
}
|
||||
|
||||
function run_test_erasure_sets() {
|
||||
minio_pid="$(start_minio_erasure_sets)"
|
||||
start_minio_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -156,14 +131,12 @@ function run_test_erasure_sets() {
|
||||
|
||||
function run_test_zone_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets) )
|
||||
start_minio_zone_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -182,16 +155,14 @@ function run_test_zone_erasure_sets()
|
||||
|
||||
function run_test_zone_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets_ipv6) )
|
||||
start_minio_zone_erasure_sets_ipv6
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -210,12 +181,12 @@ function run_test_zone_erasure_sets_ipv6()
|
||||
|
||||
function run_test_erasure()
|
||||
{
|
||||
minio_pid="$(start_minio_erasure)"
|
||||
start_minio_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -228,14 +199,12 @@ function run_test_erasure()
|
||||
|
||||
function run_test_dist_erasure()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure) )
|
||||
start_minio_dist_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
|
||||
@@ -226,6 +226,11 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
if bucket != "" {
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if _, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
@@ -42,7 +42,7 @@ import (
|
||||
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return auth.Credentials{}, nil
|
||||
|
||||
@@ -35,7 +35,7 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
var adminAPIErr APIErrorCode
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, cred
|
||||
@@ -386,7 +386,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
@@ -465,7 +465,7 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
@@ -520,7 +520,7 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
@@ -579,7 +579,7 @@ func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http
|
||||
defer logger.AuditLog(w, r, "AccountUsageInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
@@ -722,7 +722,10 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(policy)
|
||||
if err = json.NewEncoder(w).Encode(policy); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
|
||||
@@ -37,7 +37,6 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -54,16 +53,13 @@ const (
|
||||
maxEConfigJSONSize = 262272
|
||||
)
|
||||
|
||||
// Type-safe query params.
|
||||
type mgmtQueryKey string
|
||||
|
||||
// Only valid query params for mgmt admin APIs.
|
||||
const (
|
||||
mgmtBucket mgmtQueryKey = "bucket"
|
||||
mgmtPrefix = "prefix"
|
||||
mgmtClientToken = "clientToken"
|
||||
mgmtForceStart = "forceStart"
|
||||
mgmtForceStop = "forceStop"
|
||||
mgmtBucket = "bucket"
|
||||
mgmtPrefix = "prefix"
|
||||
mgmtClientToken = "clientToken"
|
||||
mgmtForceStart = "forceStart"
|
||||
mgmtForceStop = "forceStop"
|
||||
)
|
||||
|
||||
func updateServer(u *url.URL, sha256Sum []byte, lrTime time.Time, mode string) (us madmin.ServerUpdateStatus, err error) {
|
||||
@@ -298,6 +294,20 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
// ignores any errors here.
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx, false)
|
||||
|
||||
// Collect any disk healing.
|
||||
healing, _ := getAggregatedBackgroundHealState(ctx)
|
||||
healDisks := make(map[string]struct{}, len(healing.HealDisks))
|
||||
for _, disk := range healing.HealDisks {
|
||||
healDisks[disk] = struct{}{}
|
||||
}
|
||||
|
||||
// find all disks which belong to each respective endpoints
|
||||
for i, disk := range storageInfo.Disks {
|
||||
if _, ok := healDisks[disk.Endpoint]; ok {
|
||||
storageInfo.Disks[i].Healing = true
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(storageInfo)
|
||||
if err != nil {
|
||||
@@ -339,23 +349,26 @@ func (a adminAPIHandlers) DataUsageInfoHandler(w http.ResponseWriter, r *http.Re
|
||||
writeSuccessResponseJSON(w, dataUsageInfoJSON)
|
||||
}
|
||||
|
||||
func lriToLockEntry(l lockRequesterInfo, resource, server string) *madmin.LockEntry {
|
||||
func lriToLockEntry(l lockRequesterInfo, resource, server string, rquorum, wquorum int) *madmin.LockEntry {
|
||||
entry := &madmin.LockEntry{
|
||||
Timestamp: l.Timestamp,
|
||||
Resource: resource,
|
||||
ServerList: []string{server},
|
||||
Source: l.Source,
|
||||
Owner: l.Owner,
|
||||
ID: l.UID,
|
||||
}
|
||||
if l.Writer {
|
||||
entry.Type = "WRITE"
|
||||
entry.Quorum = wquorum
|
||||
} else {
|
||||
entry.Type = "READ"
|
||||
entry.Quorum = rquorum
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
func topLockEntries(peerLocks []*PeerLocks, count int) madmin.LockEntries {
|
||||
func topLockEntries(peerLocks []*PeerLocks, rquorum, wquorum int, stale bool) madmin.LockEntries {
|
||||
entryMap := make(map[string]*madmin.LockEntry)
|
||||
for _, peerLock := range peerLocks {
|
||||
if peerLock == nil {
|
||||
@@ -367,20 +380,23 @@ func topLockEntries(peerLocks []*PeerLocks, count int) madmin.LockEntries {
|
||||
if val, ok := entryMap[lockReqInfo.UID]; ok {
|
||||
val.ServerList = append(val.ServerList, peerLock.Addr)
|
||||
} else {
|
||||
entryMap[lockReqInfo.UID] = lriToLockEntry(lockReqInfo, k, peerLock.Addr)
|
||||
entryMap[lockReqInfo.UID] = lriToLockEntry(lockReqInfo, k, peerLock.Addr, rquorum, wquorum)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
var lockEntries = make(madmin.LockEntries, 0, len(entryMap))
|
||||
var lockEntries madmin.LockEntries
|
||||
for _, v := range entryMap {
|
||||
lockEntries = append(lockEntries, *v)
|
||||
if stale {
|
||||
lockEntries = append(lockEntries, *v)
|
||||
continue
|
||||
}
|
||||
if len(v.ServerList) >= v.Quorum {
|
||||
lockEntries = append(lockEntries, *v)
|
||||
}
|
||||
}
|
||||
sort.Sort(lockEntries)
|
||||
if len(lockEntries) > count {
|
||||
lockEntries = lockEntries[:count]
|
||||
}
|
||||
return lockEntries
|
||||
}
|
||||
|
||||
@@ -410,23 +426,20 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
}
|
||||
stale := r.URL.Query().Get("stale") == "true" // list also stale locks
|
||||
|
||||
peerLocks := globalNotificationSys.GetLocks(ctx)
|
||||
// Once we have received all the locks currently used from peers
|
||||
// add the local peer locks list as well.
|
||||
var getRespLocks GetLocksResp
|
||||
for _, llocker := range globalLockServers {
|
||||
getRespLocks = append(getRespLocks, llocker.DupLockMap())
|
||||
peerLocks := globalNotificationSys.GetLocks(ctx, r)
|
||||
|
||||
rquorum := getReadQuorum(objectAPI.SetDriveCount())
|
||||
wquorum := getWriteQuorum(objectAPI.SetDriveCount())
|
||||
|
||||
topLocks := topLockEntries(peerLocks, rquorum, wquorum, stale)
|
||||
|
||||
// Marshal API response upto requested count.
|
||||
if len(topLocks) > count && count > 0 {
|
||||
topLocks = topLocks[:count]
|
||||
}
|
||||
|
||||
peerLocks = append(peerLocks, &PeerLocks{
|
||||
Addr: getHostName(r),
|
||||
Locks: getRespLocks,
|
||||
})
|
||||
|
||||
topLocks := topLockEntries(peerLocks, count)
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(topLocks)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -572,8 +585,8 @@ type healInitParams struct {
|
||||
|
||||
// extractHealInitParams - Validates params for heal init API.
|
||||
func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reader) (hip healInitParams, err APIErrorCode) {
|
||||
hip.bucket = vars[string(mgmtBucket)]
|
||||
hip.objPrefix = vars[string(mgmtPrefix)]
|
||||
hip.bucket = vars[mgmtBucket]
|
||||
hip.objPrefix = vars[mgmtPrefix]
|
||||
|
||||
if hip.bucket == "" {
|
||||
if hip.objPrefix != "" {
|
||||
@@ -592,13 +605,13 @@ func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reade
|
||||
return
|
||||
}
|
||||
|
||||
if len(qParms[string(mgmtClientToken)]) > 0 {
|
||||
hip.clientToken = qParms[string(mgmtClientToken)][0]
|
||||
if len(qParms[mgmtClientToken]) > 0 {
|
||||
hip.clientToken = qParms[mgmtClientToken][0]
|
||||
}
|
||||
if _, ok := qParms[string(mgmtForceStart)]; ok {
|
||||
if _, ok := qParms[mgmtForceStart]; ok {
|
||||
hip.forceStart = true
|
||||
}
|
||||
if _, ok := qParms[string(mgmtForceStop)]; ok {
|
||||
if _, ok := qParms[mgmtForceStop]; ok {
|
||||
hip.forceStop = true
|
||||
}
|
||||
|
||||
@@ -799,14 +812,12 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
keepConnLive(w, r, respCh)
|
||||
}
|
||||
|
||||
func getAggregatedBackgroundHealState(ctx context.Context, failOnErr bool) (madmin.BgHealState, error) {
|
||||
func getAggregatedBackgroundHealState(ctx context.Context) (madmin.BgHealState, error) {
|
||||
var bgHealStates []madmin.BgHealState
|
||||
|
||||
localHealState, ok := getLocalBackgroundHealStatus()
|
||||
if !ok {
|
||||
if failOnErr {
|
||||
return madmin.BgHealState{}, errServerNotInitialized
|
||||
}
|
||||
return madmin.BgHealState{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
// Get local heal status first
|
||||
@@ -815,14 +826,16 @@ func getAggregatedBackgroundHealState(ctx context.Context, failOnErr bool) (madm
|
||||
if globalIsDistErasure {
|
||||
// Get heal status from other peers
|
||||
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus()
|
||||
var errCount int
|
||||
for _, nerr := range nerrs {
|
||||
if nerr.Err != nil {
|
||||
if failOnErr {
|
||||
return madmin.BgHealState{}, nerr.Err
|
||||
}
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
errCount++
|
||||
}
|
||||
}
|
||||
if errCount == len(nerrs) {
|
||||
return madmin.BgHealState{}, fmt.Errorf("all remote servers failed to report heal status, cluster is unhealthy")
|
||||
}
|
||||
bgHealStates = append(bgHealStates, peersHealStates...)
|
||||
}
|
||||
|
||||
@@ -868,7 +881,12 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
aggregateHealStateResult, _ := getAggregatedBackgroundHealState(r.Context(), false)
|
||||
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context())
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode(aggregateHealStateResult); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -881,7 +899,7 @@ func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Reques
|
||||
var cred auth.Credentials
|
||||
var adminAPIErr APIErrorCode
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, cred
|
||||
@@ -1085,7 +1103,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
// Avoid reusing tcp connection if read timeout is hit
|
||||
// This is needed to make r.Context().Done() work as
|
||||
// expected in case of read timeout
|
||||
w.Header().Add("Connection", "close")
|
||||
w.Header().Set("Connection", "close")
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
|
||||
@@ -1236,7 +1254,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
query := r.URL.Query()
|
||||
obdInfo := madmin.OBDInfo{}
|
||||
obdInfoCh := make(chan madmin.OBDInfo)
|
||||
|
||||
@@ -1282,7 +1300,13 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
go func() {
|
||||
defer close(obdInfoCh)
|
||||
|
||||
if cpu, ok := vars["syscpu"]; ok && cpu == "true" {
|
||||
if log := query.Get("log"); log == "true" {
|
||||
obdInfo.Logging.ServersLog = append(obdInfo.Logging.ServersLog, getLocalLogOBD(deadlinedCtx, r))
|
||||
obdInfo.Logging.ServersLog = append(obdInfo.Logging.ServersLog, globalNotificationSys.LogOBDInfo(deadlinedCtx)...)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if cpu := query.Get("syscpu"); cpu == "true" {
|
||||
cpuInfo := getLocalCPUOBDInfo(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.CPUInfo = append(obdInfo.Sys.CPUInfo, cpuInfo)
|
||||
@@ -1290,7 +1314,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if diskHw, ok := vars["sysdiskhw"]; ok && diskHw == "true" {
|
||||
if diskHw := query.Get("sysdiskhw"); diskHw == "true" {
|
||||
diskHwInfo := getLocalDiskHwOBD(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.DiskHwInfo = append(obdInfo.Sys.DiskHwInfo, diskHwInfo)
|
||||
@@ -1298,7 +1322,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if osInfo, ok := vars["sysosinfo"]; ok && osInfo == "true" {
|
||||
if osInfo := query.Get("sysosinfo"); osInfo == "true" {
|
||||
osInfo := getLocalOsInfoOBD(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.OsInfo = append(obdInfo.Sys.OsInfo, osInfo)
|
||||
@@ -1306,7 +1330,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if mem, ok := vars["sysmem"]; ok && mem == "true" {
|
||||
if mem := query.Get("sysmem"); mem == "true" {
|
||||
memInfo := getLocalMemOBD(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.MemInfo = append(obdInfo.Sys.MemInfo, memInfo)
|
||||
@@ -1314,7 +1338,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if proc, ok := vars["sysprocess"]; ok && proc == "true" {
|
||||
if proc := query.Get("sysprocess"); proc == "true" {
|
||||
procInfo := getLocalProcOBD(deadlinedCtx, r)
|
||||
|
||||
obdInfo.Sys.ProcInfo = append(obdInfo.Sys.ProcInfo, procInfo)
|
||||
@@ -1322,14 +1346,14 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if config, ok := vars["minioconfig"]; ok && config == "true" {
|
||||
if config := query.Get("minioconfig"); config == "true" {
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
logger.LogIf(ctx, err)
|
||||
obdInfo.Minio.Config = cfg
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if drive, ok := vars["perfdrive"]; ok && drive == "true" {
|
||||
if drive := query.Get("perfdrive"); drive == "true" {
|
||||
// Get drive obd details from local server's drive(s)
|
||||
driveOBDSerial := getLocalDrivesOBD(deadlinedCtx, false, globalEndpoints, r)
|
||||
driveOBDParallel := getLocalDrivesOBD(deadlinedCtx, true, globalEndpoints, r)
|
||||
@@ -1360,7 +1384,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
if net, ok := vars["perfnet"]; ok && net == "true" && globalIsDistErasure {
|
||||
if net := query.Get("perfnet"); net == "true" && globalIsDistErasure {
|
||||
obdInfo.Perf.Net = append(obdInfo.Perf.Net, globalNotificationSys.NetOBDInfo(deadlinedCtx))
|
||||
partialWrite(obdInfo)
|
||||
|
||||
@@ -1374,6 +1398,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
obdInfo.Perf.NetParallel = globalNotificationSys.NetOBDParallelInfo(deadlinedCtx)
|
||||
partialWrite(obdInfo)
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
@@ -1413,12 +1438,6 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
buckets := madmin.Buckets{}
|
||||
objects := madmin.Objects{}
|
||||
usage := madmin.Usage{}
|
||||
@@ -1430,7 +1449,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
|
||||
}
|
||||
|
||||
vault := fetchVaultStatus(cfg)
|
||||
vault := fetchVaultStatus()
|
||||
|
||||
ldap := madmin.LDAP{}
|
||||
if globalLDAPConfig.Enabled {
|
||||
@@ -1446,10 +1465,10 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
|
||||
log, audit := fetchLoggerInfo(cfg)
|
||||
log, audit := fetchLoggerInfo()
|
||||
|
||||
// Get the notification target info
|
||||
notifyTarget := fetchLambdaInfo(cfg)
|
||||
notifyTarget := fetchLambdaInfo()
|
||||
|
||||
// Fetching the Storage information, ignore any errors.
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx, false)
|
||||
@@ -1471,11 +1490,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
|
||||
mode := "safemode"
|
||||
if newObjectLayerFn() != nil {
|
||||
mode = "online"
|
||||
}
|
||||
|
||||
mode := "online"
|
||||
server := getLocalServerProperty(globalEndpoints, r)
|
||||
servers := globalNotificationSys.ServerInfo()
|
||||
servers = append(servers, server)
|
||||
@@ -1497,8 +1512,12 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add all the disks local to this server.
|
||||
for _, disk := range storageInfo.Disks {
|
||||
if disk.DrivePath == "" && disk.Endpoint == "" {
|
||||
continue
|
||||
}
|
||||
if disk.Endpoint == disk.DrivePath {
|
||||
servers[len(servers)-1].Disks = append(servers[len(servers)-1].Disks, disk)
|
||||
}
|
||||
@@ -1525,27 +1544,33 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
//Reply with storage information (across nodes in a
|
||||
// Reply with storage information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
}
|
||||
|
||||
func fetchLambdaInfo(cfg config.Config) []map[string][]madmin.TargetIDStatus {
|
||||
|
||||
// Fetch the configured targets
|
||||
tr := NewGatewayHTTPTransport()
|
||||
defer tr.CloseIdleConnections()
|
||||
targetList, err := notify.FetchRegisteredTargets(cfg, GlobalContext.Done(), tr, true, false)
|
||||
if err != nil && err != notify.ErrTargetsOffline {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return nil
|
||||
}
|
||||
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
|
||||
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
||||
|
||||
for targetID, target := range targetList.TargetMap() {
|
||||
for _, tgt := range globalConfigTargetList.Targets() {
|
||||
targetIDStatus := make(map[string]madmin.Status)
|
||||
active, _ := target.IsActive()
|
||||
active, _ := tgt.IsActive()
|
||||
targetID := tgt.ID()
|
||||
if active {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
|
||||
} else {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Offline"}
|
||||
}
|
||||
list := lambdaMap[targetID.Name]
|
||||
list = append(list, targetIDStatus)
|
||||
lambdaMap[targetID.Name] = list
|
||||
}
|
||||
|
||||
for _, tgt := range globalEnvTargetList.Targets() {
|
||||
targetIDStatus := make(map[string]madmin.Status)
|
||||
active, _ := tgt.IsActive()
|
||||
targetID := tgt.ID()
|
||||
if active {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
|
||||
} else {
|
||||
@@ -1554,8 +1579,6 @@ func fetchLambdaInfo(cfg config.Config) []map[string][]madmin.TargetIDStatus {
|
||||
list := lambdaMap[targetID.Name]
|
||||
list = append(list, targetIDStatus)
|
||||
lambdaMap[targetID.Name] = list
|
||||
// Close any leaking connections
|
||||
_ = target.Close()
|
||||
}
|
||||
|
||||
notify := make([]map[string][]madmin.TargetIDStatus, len(lambdaMap))
|
||||
@@ -1570,7 +1593,7 @@ func fetchLambdaInfo(cfg config.Config) []map[string][]madmin.TargetIDStatus {
|
||||
}
|
||||
|
||||
// fetchVaultStatus fetches Vault Info
|
||||
func fetchVaultStatus(cfg config.Config) madmin.Vault {
|
||||
func fetchVaultStatus() madmin.Vault {
|
||||
vault := madmin.Vault{}
|
||||
if GlobalKMS == nil {
|
||||
vault.Status = "disabled"
|
||||
@@ -1579,12 +1602,12 @@ func fetchVaultStatus(cfg config.Config) madmin.Vault {
|
||||
keyID := GlobalKMS.DefaultKeyID()
|
||||
kmsInfo := GlobalKMS.Info()
|
||||
|
||||
if kmsInfo.Endpoint == "" {
|
||||
if len(kmsInfo.Endpoints) == 0 {
|
||||
vault.Status = "KMS configured using master key"
|
||||
return vault
|
||||
}
|
||||
|
||||
if err := checkConnection(kmsInfo.Endpoint, 15*time.Second); err != nil {
|
||||
if err := checkConnection(kmsInfo.Endpoints[0], 15*time.Second); err != nil {
|
||||
vault.Status = "offline"
|
||||
} else {
|
||||
vault.Status = "online"
|
||||
@@ -1613,41 +1636,42 @@ func fetchVaultStatus(cfg config.Config) madmin.Vault {
|
||||
}
|
||||
|
||||
// fetchLoggerDetails return log info
|
||||
func fetchLoggerInfo(cfg config.Config) ([]madmin.Logger, []madmin.Audit) {
|
||||
loggerCfg, _ := logger.LookupConfig(cfg)
|
||||
|
||||
var logger []madmin.Logger
|
||||
var auditlogger []madmin.Audit
|
||||
for log, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
err := checkConnection(l.Endpoint, 15*time.Second)
|
||||
func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
var loggerInfo []madmin.Logger
|
||||
var auditloggerInfo []madmin.Audit
|
||||
for _, target := range logger.Targets {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
if err == nil {
|
||||
mapLog := make(map[string]madmin.Status)
|
||||
mapLog[log] = madmin.Status{Status: "Online"}
|
||||
logger = append(logger, mapLog)
|
||||
mapLog[tgt] = madmin.Status{Status: "Online"}
|
||||
loggerInfo = append(loggerInfo, mapLog)
|
||||
} else {
|
||||
mapLog := make(map[string]madmin.Status)
|
||||
mapLog[log] = madmin.Status{Status: "offline"}
|
||||
logger = append(logger, mapLog)
|
||||
mapLog[tgt] = madmin.Status{Status: "offline"}
|
||||
loggerInfo = append(loggerInfo, mapLog)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for audit, l := range loggerCfg.Audit {
|
||||
if l.Enabled {
|
||||
err := checkConnection(l.Endpoint, 15*time.Second)
|
||||
for _, target := range logger.AuditTargets {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
if err == nil {
|
||||
mapAudit := make(map[string]madmin.Status)
|
||||
mapAudit[audit] = madmin.Status{Status: "Online"}
|
||||
auditlogger = append(auditlogger, mapAudit)
|
||||
mapAudit[tgt] = madmin.Status{Status: "Online"}
|
||||
auditloggerInfo = append(auditloggerInfo, mapAudit)
|
||||
} else {
|
||||
mapAudit := make(map[string]madmin.Status)
|
||||
mapAudit[audit] = madmin.Status{Status: "Offline"}
|
||||
auditlogger = append(auditlogger, mapAudit)
|
||||
mapAudit[tgt] = madmin.Status{Status: "Offline"}
|
||||
auditloggerInfo = append(auditloggerInfo, mapAudit)
|
||||
}
|
||||
}
|
||||
}
|
||||
return logger, auditlogger
|
||||
|
||||
return loggerInfo, auditloggerInfo
|
||||
}
|
||||
|
||||
// checkConnection - ping an endpoint , return err in case of no connection
|
||||
@@ -1655,11 +1679,6 @@ func checkConnection(endpointStr string, timeout time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, timeout)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequest(http.MethodHead, endpointStr, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := &http.Client{Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: xhttp.NewCustomDialContext(timeout),
|
||||
@@ -1674,11 +1693,15 @@ func checkConnection(endpointStr string, timeout time.Duration) error {
|
||||
}}
|
||||
defer client.CloseIdleConnections()
|
||||
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, endpointStr, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer xhttp.DrainBody(resp.Body)
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -327,13 +327,13 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
mkParams := func(clientToken string, forceStart, forceStop bool) url.Values {
|
||||
v := url.Values{}
|
||||
if clientToken != "" {
|
||||
v.Add(string(mgmtClientToken), clientToken)
|
||||
v.Add(mgmtClientToken, clientToken)
|
||||
}
|
||||
if forceStart {
|
||||
v.Add(string(mgmtForceStart), "")
|
||||
v.Add(mgmtForceStart, "")
|
||||
}
|
||||
if forceStop {
|
||||
v.Add(string(mgmtForceStop), "")
|
||||
v.Add(mgmtForceStop, "")
|
||||
}
|
||||
return v
|
||||
}
|
||||
@@ -351,11 +351,11 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
}
|
||||
varsArr := []map[string]string{
|
||||
// Invalid cases
|
||||
{string(mgmtPrefix): "objprefix"},
|
||||
{mgmtPrefix: "objprefix"},
|
||||
// Valid cases
|
||||
{},
|
||||
{string(mgmtBucket): "bucket"},
|
||||
{string(mgmtBucket): "bucket", string(mgmtPrefix): "objprefix"},
|
||||
{mgmtBucket: "bucket"},
|
||||
{mgmtBucket: "bucket", mgmtPrefix: "objprefix"},
|
||||
}
|
||||
|
||||
// Body is always valid - we do not test JSON decoding.
|
||||
|
||||
@@ -85,17 +85,18 @@ type healSequenceStatus struct {
|
||||
|
||||
// structure to hold state of all heal sequences in server memory
|
||||
type allHealState struct {
|
||||
sync.Mutex
|
||||
sync.RWMutex
|
||||
|
||||
// map of heal path to heal sequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healLocalDisks []Endpoints
|
||||
healLocalDisks map[Endpoint]struct{}
|
||||
}
|
||||
|
||||
// newHealState - initialize global heal state management
|
||||
func newHealState() *allHealState {
|
||||
healState := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healLocalDisks: map[Endpoint]struct{}{},
|
||||
}
|
||||
|
||||
go healState.periodicHealSeqsClean(GlobalContext)
|
||||
@@ -103,20 +104,40 @@ func newHealState() *allHealState {
|
||||
return healState
|
||||
}
|
||||
|
||||
func (ahs *allHealState) getHealLocalDisks() []Endpoints {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
func (ahs *allHealState) healDriveCount() int {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
healLocalDisks := make([]Endpoints, len(ahs.healLocalDisks))
|
||||
copy(healLocalDisks, ahs.healLocalDisks)
|
||||
return healLocalDisks
|
||||
return len(ahs.healLocalDisks)
|
||||
}
|
||||
|
||||
func (ahs *allHealState) updateHealLocalDisks(healLocalDisks []Endpoints) {
|
||||
func (ahs *allHealState) getHealLocalDisks() Endpoints {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
var endpoints Endpoints
|
||||
for ep := range ahs.healLocalDisks {
|
||||
endpoints = append(endpoints, ep)
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
ahs.healLocalDisks = healLocalDisks
|
||||
for _, ep := range healLocalDisks {
|
||||
delete(ahs.healLocalDisks, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
ahs.healLocalDisks[ep] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
@@ -507,7 +528,7 @@ func (h *healSequence) hasEnded() bool {
|
||||
if h.clientToken == bgHealingUUID {
|
||||
return false
|
||||
}
|
||||
return len(h.currentStatus.Items) == 0 || h.currentStatus.Summary == healStoppedStatus || h.currentStatus.Summary == healFinishedStatus
|
||||
return !h.endTime.IsZero()
|
||||
}
|
||||
|
||||
// stops the heal sequence - safe to call multiple times.
|
||||
@@ -647,6 +668,12 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
|
||||
globalBackgroundHealRoutine.queueHealTask(task)
|
||||
|
||||
select {
|
||||
@@ -729,9 +756,6 @@ func (h *healSequence) healItemsFromSourceCh() error {
|
||||
pathJoin(source.bucket, source.object), err))
|
||||
}
|
||||
}
|
||||
|
||||
h.scannedItemsMap[itemType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
@@ -784,7 +808,7 @@ func (h *healSequence) traverseAndHeal() {
|
||||
func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
return func() error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -820,7 +844,7 @@ func (h *healSequence) healDiskFormat() error {
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -840,7 +864,7 @@ func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -862,13 +886,15 @@ func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
// healBucket - traverses and heals given bucket
|
||||
func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
|
||||
return err
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if bucketsOnly {
|
||||
@@ -882,6 +908,9 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
oi, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
if err == nil {
|
||||
if err = h.healObject(bucket, h.object, oi.VersionID); err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -891,7 +920,11 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -899,7 +932,7 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
// healObject - heal the given object and record result
|
||||
func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -913,10 +946,5 @@ func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -180,19 +180,19 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
}
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
}
|
||||
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
}
|
||||
}
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
if globalIsDistErasure {
|
||||
@@ -212,22 +212,12 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
if !globalIsGateway {
|
||||
// -- OBD API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
|
||||
Queries("perfdrive", "{perfdrive:true|false}",
|
||||
"perfnet", "{perfnet:true|false}",
|
||||
"minioinfo", "{minioinfo:true|false}",
|
||||
"minioconfig", "{minioconfig:true|false}",
|
||||
"syscpu", "{syscpu:true|false}",
|
||||
"sysdiskhw", "{sysdiskhw:true|false}",
|
||||
"sysosinfo", "{sysosinfo:true|false}",
|
||||
"sysmem", "{sysmem:true|false}",
|
||||
"sysprocess", "{sysprocess:true|false}",
|
||||
)
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler))
|
||||
}
|
||||
}
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.NotFoundHandler = httpTraceAll(errorResponseHandler)
|
||||
adminRouter.MethodNotAllowedHandler = httpTraceAll(errorResponseHandler)
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
@@ -106,15 +106,17 @@ const (
|
||||
ErrNoSuchCORSConfiguration
|
||||
ErrNoSuchWebsiteConfiguration
|
||||
ErrReplicationConfigurationNotFoundError
|
||||
ErrReplicationDestinationNotFoundError
|
||||
ErrRemoteDestinationNotFoundError
|
||||
ErrReplicationDestinationMissingLock
|
||||
ErrReplicationTargetNotFoundError
|
||||
ErrRemoteTargetNotFoundError
|
||||
ErrReplicationRemoteConnectionError
|
||||
ErrBucketRemoteIdenticalToSource
|
||||
ErrBucketRemoteAlreadyExists
|
||||
ErrBucketRemoteLabelInUse
|
||||
ErrBucketRemoteArnTypeInvalid
|
||||
ErrBucketRemoteArnInvalid
|
||||
ErrBucketRemoteRemoveDisallowed
|
||||
ErrReplicationTargetNotVersionedError
|
||||
ErrRemoteTargetNotVersionedError
|
||||
ErrReplicationSourceNotVersionedError
|
||||
ErrReplicationNeedsVersioningError
|
||||
ErrReplicationBucketNeedsVersioningError
|
||||
@@ -808,9 +810,9 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The replication configuration was not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationDestinationNotFoundError: {
|
||||
Code: "ReplicationDestinationNotFoundError",
|
||||
Description: "The replication destination bucket does not exist",
|
||||
ErrRemoteDestinationNotFoundError: {
|
||||
Code: "RemoteDestinationNotFoundError",
|
||||
Description: "The remote destination bucket does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationDestinationMissingLock: {
|
||||
@@ -818,24 +820,34 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The replication destination bucket does not have object locking enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationTargetNotFoundError: {
|
||||
Code: "XminioAdminReplicationTargetNotFoundError",
|
||||
Description: "The replication target does not exist",
|
||||
ErrRemoteTargetNotFoundError: {
|
||||
Code: "XMinioAdminRemoteTargetNotFoundError",
|
||||
Description: "The remote target does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationRemoteConnectionError: {
|
||||
Code: "XMinioAdminReplicationRemoteConnectionError",
|
||||
Description: "Remote service endpoint or target bucket not available",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrBucketRemoteIdenticalToSource: {
|
||||
Code: "XminioAdminRemoteIdenticalToSource",
|
||||
Code: "XMinioAdminRemoteIdenticalToSource",
|
||||
Description: "The remote target cannot be identical to source",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteAlreadyExists: {
|
||||
Code: "XminioAdminBucketRemoteAlreadyExists",
|
||||
Code: "XMinioAdminBucketRemoteAlreadyExists",
|
||||
Description: "The remote target already exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteLabelInUse: {
|
||||
Code: "XMinioAdminBucketRemoteLabelInUse",
|
||||
Description: "The remote target with this label already exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteRemoveDisallowed: {
|
||||
Code: "XMinioAdminRemoteRemoveDisallowed",
|
||||
Description: "Replication configuration exists with this ARN.",
|
||||
Description: "This ARN is in use by an existing configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteArnTypeInvalid: {
|
||||
@@ -848,9 +860,9 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The bucket remote ARN does not have correct format",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationTargetNotVersionedError: {
|
||||
Code: "ReplicationTargetNotVersionedError",
|
||||
Description: "The replication target does not have versioning enabled",
|
||||
ErrRemoteTargetNotVersionedError: {
|
||||
Code: "RemoteTargetNotVersionedError",
|
||||
Description: "The remote target does not have versioning enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationSourceNotVersionedError: {
|
||||
@@ -965,7 +977,7 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMetadataTooLarge: {
|
||||
Code: "InvalidArgument",
|
||||
Code: "MetadataTooLarge",
|
||||
Description: "Your metadata headers exceed the maximum allowed metadata size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
@@ -1900,22 +1912,26 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrAdminNoSuchQuotaConfiguration
|
||||
case BucketReplicationConfigNotFound:
|
||||
apiErr = ErrReplicationConfigurationNotFoundError
|
||||
case BucketReplicationDestinationNotFound:
|
||||
apiErr = ErrReplicationDestinationNotFoundError
|
||||
case BucketRemoteDestinationNotFound:
|
||||
apiErr = ErrRemoteDestinationNotFoundError
|
||||
case BucketReplicationDestinationMissingLock:
|
||||
apiErr = ErrReplicationDestinationMissingLock
|
||||
case BucketRemoteTargetNotFound:
|
||||
apiErr = ErrReplicationTargetNotFoundError
|
||||
apiErr = ErrRemoteTargetNotFoundError
|
||||
case BucketRemoteConnectionErr:
|
||||
apiErr = ErrReplicationRemoteConnectionError
|
||||
case BucketRemoteAlreadyExists:
|
||||
apiErr = ErrBucketRemoteAlreadyExists
|
||||
case BucketRemoteLabelInUse:
|
||||
apiErr = ErrBucketRemoteLabelInUse
|
||||
case BucketRemoteArnTypeInvalid:
|
||||
apiErr = ErrBucketRemoteArnTypeInvalid
|
||||
case BucketRemoteArnInvalid:
|
||||
apiErr = ErrBucketRemoteArnInvalid
|
||||
case BucketRemoteRemoveDisallowed:
|
||||
apiErr = ErrBucketRemoteRemoveDisallowed
|
||||
case BucketReplicationTargetNotVersioned:
|
||||
apiErr = ErrReplicationTargetNotVersionedError
|
||||
case BucketRemoteTargetNotVersioned:
|
||||
apiErr = ErrRemoteTargetNotVersionedError
|
||||
case BucketReplicationSourceNotVersioned:
|
||||
apiErr = ErrReplicationSourceNotVersionedError
|
||||
case BucketQuotaExceeded:
|
||||
@@ -1948,6 +1964,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrBackendDown
|
||||
case ObjectNameTooLong:
|
||||
apiErr = ErrKeyTooLongError
|
||||
case dns.ErrInvalidBucketName:
|
||||
apiErr = ErrInvalidBucketName
|
||||
default:
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
@@ -1984,6 +2002,12 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
|
||||
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
e, ok := err.(dns.ErrInvalidBucketName)
|
||||
if ok {
|
||||
code := toAPIErrorCode(ctx, e)
|
||||
apiErr = errorCodes.ToAPIErrWithErr(code, e)
|
||||
}
|
||||
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
|
||||
@@ -84,7 +84,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
@@ -147,15 +147,26 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
}
|
||||
}
|
||||
|
||||
var start, rangeLen int64
|
||||
totalObjectSize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// for providing ranged content
|
||||
start, rangeLen, err := rs.GetOffsetLength(totalObjectSize)
|
||||
if err != nil {
|
||||
return err
|
||||
if opts.PartNumber > 0 {
|
||||
var start, end int64
|
||||
for i := 0; i < len(objInfo.Parts) && i < opts.PartNumber; i++ {
|
||||
start = end
|
||||
end = start + objInfo.Parts[i].ActualSize - 1
|
||||
}
|
||||
rs = &HTTPRangeSpec{Start: start, End: end}
|
||||
rangeLen = end - start + 1
|
||||
} else {
|
||||
// for providing ranged content
|
||||
start, rangeLen, err = rs.GetOffsetLength(totalObjectSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Set content length.
|
||||
|
||||
@@ -36,7 +36,7 @@ import (
|
||||
const (
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 10000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
@@ -408,7 +408,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
// generates ListBucketsResponse from array of BucketInfo which can be
|
||||
// serialized to match XML and JSON API spec output.
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
var listbuckets []Bucket
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{}
|
||||
|
||||
@@ -428,8 +428,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
var versions []ObjectVersion
|
||||
var prefixes []CommonPrefix
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListVersionsResponse{}
|
||||
|
||||
@@ -473,6 +472,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
data.VersionIDMarker = versionIDMarker
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -484,8 +484,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
var contents []Object
|
||||
var prefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsResponse{}
|
||||
|
||||
@@ -517,9 +516,10 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
data.Marker = s3EncodeName(marker, encodingType)
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -531,8 +531,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
var contents []Object
|
||||
var commonPrefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
@@ -585,6 +584,8 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
|
||||
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
|
||||
data.IsTruncated = isTruncated
|
||||
|
||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -702,10 +703,6 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
|
||||
}
|
||||
|
||||
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
if newObjectLayerFn() == nil {
|
||||
// Server still in safe mode.
|
||||
w.Header().Set(xhttp.MinIOServerStatus, "safemode")
|
||||
}
|
||||
setCommonHeaders(w)
|
||||
if mType != mimeNone {
|
||||
w.Header().Set(xhttp.ContentType, string(mType))
|
||||
@@ -772,10 +769,6 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalBrowserEnabled {
|
||||
if newObjectLayerFn() == nil {
|
||||
// server still in safe mode.
|
||||
w.Header().Set(xhttp.MinIOServerStatus, "safemode")
|
||||
}
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
@@ -31,28 +32,15 @@ func newHTTPServerFn() *xhttp.Server {
|
||||
return globalHTTPServer
|
||||
}
|
||||
|
||||
func newObjectLayerWithoutSafeModeFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newObjectLayerFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newCachedObjectLayerFn() CacheObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
return globalCacheObjectAPI
|
||||
}
|
||||
|
||||
@@ -60,31 +48,53 @@ func newCachedObjectLayerFn() CacheObjectLayer {
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
CacheAPI func() CacheObjectLayer
|
||||
// Returns true of handlers should interpret encryption.
|
||||
EncryptionEnabled func() bool
|
||||
// Returns true if handlers allow SSE-KMS encryption headers.
|
||||
AllowSSEKMS func() bool
|
||||
}
|
||||
|
||||
// getHost tries its best to return the request host.
|
||||
// According to section 14.23 of RFC 2616 the Host header
|
||||
// can include the port number if the default value of 80 is not used.
|
||||
func getHost(r *http.Request) string {
|
||||
if r.URL.IsAbs() {
|
||||
return r.URL.Host
|
||||
}
|
||||
return r.Host
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) {
|
||||
func registerAPIRouter(router *mux.Router) {
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
CacheAPI: newCachedObjectLayerFn,
|
||||
EncryptionEnabled: func() bool {
|
||||
return encryptionEnabled
|
||||
},
|
||||
AllowSSEKMS: func() bool {
|
||||
return allowSSEKMS
|
||||
},
|
||||
}
|
||||
|
||||
// API Router
|
||||
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
|
||||
|
||||
var routers []*mux.Router
|
||||
for _, domainName := range globalDomainNames {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
if IsKubernetes() {
|
||||
routers = append(routers, apiRouter.MatcherFunc(func(r *http.Request, match *mux.RouteMatch) bool {
|
||||
host, _, err := net.SplitHostPort(getHost(r))
|
||||
if err != nil {
|
||||
host = r.Host
|
||||
}
|
||||
// Make sure to skip matching minio.<domain>` this is
|
||||
// specifically meant for operator/k8s deployment
|
||||
// The reason we need to skip this is for a special
|
||||
// usecase where we need to make sure that
|
||||
// minio.<namespace>.svc.<cluster_domain> is ignored
|
||||
// by the bucketDNS style to ensure that path style
|
||||
// is available and honored at this domain.
|
||||
//
|
||||
// All other `<bucket>.<namespace>.svc.<cluster_domain>`
|
||||
// makes sure that buckets are routed through this matcher
|
||||
// to match for `<bucket>`
|
||||
return host != minioReservedBucket+"."+domainName
|
||||
}).Host("{bucket:.+}."+domainName).Subrouter())
|
||||
} else {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
}
|
||||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
@@ -94,7 +104,10 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
|
||||
// CopyObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).
|
||||
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
@@ -138,7 +151,8 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
|
||||
// CopyObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
|
||||
@@ -305,8 +319,8 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.MethodNotAllowedHandler = http.HandlerFunc(collectAPIStats("methodnotallowed", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
||||
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(errorResponseHandler))
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -346,8 +346,26 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
|
||||
// verify as a fallback.
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
@@ -360,6 +378,22 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
|
||||
// verify as a fallback.
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(policy.ListBucketAction),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
@@ -101,6 +100,7 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
ObjectPathUpdated(path.Join(task.bucket, task.object))
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
|
||||
case <-h.doneCh:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
@@ -128,24 +128,5 @@ func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpt
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers if healing is not required.
|
||||
if err == nil {
|
||||
// Notify servers in background and retry if needed.
|
||||
go func() {
|
||||
retry:
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(opts.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
if nerr.Err.Error() == errServerNotInitialized.Error() {
|
||||
time.Sleep(time.Second)
|
||||
goto retry
|
||||
}
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -26,7 +26,17 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
const defaultMonitorNewDiskInterval = time.Minute * 3
|
||||
const (
|
||||
defaultMonitorNewDiskInterval = time.Second * 10
|
||||
healingTrackerFilename = ".healing.bin"
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
type healingTracker struct {
|
||||
ID string
|
||||
|
||||
// future add more tracking capabilities
|
||||
}
|
||||
|
||||
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
z, ok := objAPI.(*erasureZones)
|
||||
@@ -36,15 +46,6 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
|
||||
initBackgroundHealing(ctx, objAPI) // start quick background healing
|
||||
|
||||
localDisksInZoneHeal := getLocalDisksToHeal(objAPI)
|
||||
globalBackgroundHealState.updateHealLocalDisks(localDisksInZoneHeal)
|
||||
|
||||
drivesToHeal := getDrivesToHealCount(localDisksInZoneHeal)
|
||||
if drivesToHeal != 0 {
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
|
||||
drivesToHeal, defaultMonitorNewDiskInterval))
|
||||
}
|
||||
|
||||
var bgSeq *healSequence
|
||||
var found bool
|
||||
|
||||
@@ -56,7 +57,12 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
if drivesToHeal != 0 {
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
|
||||
if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
|
||||
drivesToHeal, defaultMonitorNewDiskInterval))
|
||||
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
if err := bgSeq.healDiskMeta(); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
@@ -67,49 +73,29 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
go monitorLocalDisksAndHeal(ctx, z, drivesToHeal, localDisksInZoneHeal, bgSeq)
|
||||
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal(objAPI ObjectLayer) []Endpoints {
|
||||
z, ok := objAPI.(*erasureZones)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attempt a heal as the server starts-up first.
|
||||
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
|
||||
for i, ep := range globalEndpoints {
|
||||
localDisksToHeal := Endpoints{}
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
for _, ep := range globalEndpoints {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
disk, _, err := connectEndpoint(endpoint)
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||
disksToHeal = append(disksToHeal, endpoint)
|
||||
} else if err == nil && disk != nil && disk.Healing() {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
if len(localDisksToHeal) == 0 {
|
||||
continue
|
||||
}
|
||||
localDisksInZoneHeal[i] = localDisksToHeal
|
||||
}
|
||||
return localDisksInZoneHeal
|
||||
return disksToHeal
|
||||
|
||||
}
|
||||
|
||||
func getDrivesToHealCount(localDisksInZoneHeal []Endpoints) int {
|
||||
var drivesToHeal int
|
||||
for _, eps := range localDisksInZoneHeal {
|
||||
for range eps {
|
||||
drivesToHeal++
|
||||
}
|
||||
}
|
||||
return drivesToHeal
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
@@ -121,76 +107,79 @@ func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureZones, drivesToHeal int, localDisksInZoneHeal []Endpoints, bgSeq *healSequence) {
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureZones, bgSeq *healSequence) {
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(defaultMonitorNewDiskInterval):
|
||||
// heal only if new disks found.
|
||||
if drivesToHeal == 0 {
|
||||
localDisksInZoneHeal = getLocalDisksToHeal(z)
|
||||
drivesToHeal = getDrivesToHealCount(localDisksInZoneHeal)
|
||||
if drivesToHeal == 0 {
|
||||
// No drives to heal.
|
||||
globalBackgroundHealState.updateHealLocalDisks(nil)
|
||||
continue
|
||||
}
|
||||
globalBackgroundHealState.updateHealLocalDisks(localDisksInZoneHeal)
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()), time.Second)
|
||||
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
drivesToHeal))
|
||||
var erasureSetInZoneDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
healDisks := globalBackgroundHealState.getHealLocalDisks()
|
||||
if len(healDisks) > 0 {
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{bucket: nopHeal}
|
||||
}
|
||||
|
||||
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
|
||||
// Compute the list of erasure set to heal
|
||||
for i, localDisksToHeal := range localDisksInZoneHeal {
|
||||
var erasureSetToHeal []int
|
||||
for _, endpoint := range localDisksToHeal {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, true)
|
||||
continue
|
||||
}
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
len(healDisks)))
|
||||
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, false)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||
}
|
||||
erasureSetInZoneToHeal[i] = erasureSetToHeal
|
||||
}
|
||||
|
||||
logger.Info("New unformatted drives detected attempting to heal the content...")
|
||||
for i, disks := range localDisksInZoneHeal {
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%s' on %s zone", disk, humanize.Ordinal(i+1))
|
||||
erasureSetInZoneDisksToHeal = make([]map[int][]StorageAPI, len(z.zones))
|
||||
for i := range z.zones {
|
||||
erasureSetInZoneDisksToHeal[i] = map[int][]StorageAPI{}
|
||||
}
|
||||
}
|
||||
|
||||
// Heal all erasure sets that need
|
||||
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
|
||||
for _, setIndex := range erasureSetToHeal {
|
||||
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].drivesPerSet)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
// heal only if new disks found.
|
||||
for _, endpoint := range healDisks {
|
||||
disk, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, true)
|
||||
continue
|
||||
}
|
||||
|
||||
// Only upon success reduce the counter
|
||||
if err == nil {
|
||||
drivesToHeal--
|
||||
zoneIdx := globalEndpoints.GetLocalZoneIdx(disk.Endpoint())
|
||||
if zoneIdx < 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[zoneIdx].format, format)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, false)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetInZoneDisksToHeal[zoneIdx][setIndex] = append(erasureSetInZoneDisksToHeal[zoneIdx][setIndex], disk)
|
||||
}
|
||||
|
||||
buckets, _ := z.ListBucketsHeal(ctx)
|
||||
for i, setMap := range erasureSetInZoneDisksToHeal {
|
||||
for setIndex, disks := range setMap {
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%s' on %s zone", disk, humanize.Ordinal(i+1))
|
||||
|
||||
lbDisks := z.zones[i].sets[setIndex].getLoadBalancedNDisks(z.zones[i].listTolerancePerSet)
|
||||
if err := healErasureSet(ctx, setIndex, buckets, lbDisks); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s zone complete", disk, humanize.Ordinal(i+1))
|
||||
|
||||
if err := disk.DeleteFile(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
healingTrackerFilename); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
110
cmd/background-newdisks-heal-ops_gen.go
Normal file
110
cmd/background-newdisks-heal-ops_gen.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ID":
|
||||
z.ID, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
// write "ID"
|
||||
err = en.Append(0x81, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.ID)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// string "ID"
|
||||
o = append(o, 0x81, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ID":
|
||||
z.ID, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z healingTracker) Msgsize() (s int) {
|
||||
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID)
|
||||
return
|
||||
}
|
||||
123
cmd/background-newdisks-heal-ops_gen_test.go
Normal file
123
cmd/background-newdisks-heal-ops_gen_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalhealingTracker(t *testing.T) {
|
||||
v := healingTracker{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsghealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsghealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalhealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodehealingTracker(t *testing.T) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodehealingTracker Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := healingTracker{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodehealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodehealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
@@ -80,7 +81,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
err := disk.CreateFile(volume, filePath, totalFileSize, r)
|
||||
err := disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r)
|
||||
r.CloseWithError(err)
|
||||
close(bw.canClose)
|
||||
}()
|
||||
@@ -118,7 +119,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
// For the first ReadAt() call we need to open the stream for reading.
|
||||
b.currOffset = offset
|
||||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
||||
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -139,8 +140,8 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err := &errHashMismatch{fmt.Sprintf("Disk: %s - content hash does not match - expected %s, got %s",
|
||||
b.disk, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
err := &errHashMismatch{fmt.Sprintf("Disk: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
@@ -34,7 +35,7 @@ type wholeBitrotWriter struct {
|
||||
}
|
||||
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
err := b.disk.AppendFile(b.volume, b.filePath, p)
|
||||
err := b.disk.AppendFile(context.TODO(), b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
@@ -69,13 +70,13 @@ type wholeBitrotReader struct {
|
||||
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) {
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(GlobalContext, errLessData)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
@@ -34,12 +35,12 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
volume := "testvol"
|
||||
filePath := "testfile"
|
||||
|
||||
disk, err := newXLStorage(tmpDir, "")
|
||||
disk, err := newLocalXLStorage(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
disk.MakeVol(volume)
|
||||
disk.MakeVol(context.Background(), volume)
|
||||
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
|
||||
|
||||
|
||||
@@ -131,7 +131,7 @@ func (client *bootstrapRESTClient) callWithContext(ctx context.Context, method s
|
||||
values = make(url.Values)
|
||||
}
|
||||
|
||||
respBody, err = client.restClient.CallWithContext(ctx, method, values, body, length)
|
||||
respBody, err = client.restClient.Call(ctx, method, values, body, length)
|
||||
if err == nil {
|
||||
return respBody, nil
|
||||
}
|
||||
@@ -178,17 +178,22 @@ func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones)
|
||||
}
|
||||
onlineServers++
|
||||
}
|
||||
// Sleep for a while - so that we don't go into
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
retries++
|
||||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
// Sleep for a while - so that we don't go into
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
retries++
|
||||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d remote servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -228,13 +233,13 @@ func newBootstrapRESTClient(endpoint Endpoint) *bootstrapRESTClient {
|
||||
}
|
||||
}
|
||||
|
||||
trFn := newInternodeHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)
|
||||
trFn := newInternodeHTTPTransport(tlsConfig, rest.DefaultTimeout)
|
||||
restClient := rest.NewClient(serverURL, trFn, newAuthToken)
|
||||
restClient.HealthCheckFn = func() bool {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, restClient.HealthCheckTimeout)
|
||||
// Instantiate a new rest client for healthcheck
|
||||
// to avoid recursive healthCheckFn()
|
||||
respBody, err := rest.NewClient(serverURL, trFn, newAuthToken).CallWithContext(ctx, bootstrapRESTMethodHealth, nil, nil, -1)
|
||||
respBody, err := rest.NewClient(serverURL, trFn, newAuthToken).Call(ctx, bootstrapRESTMethodHealth, nil, nil, -1)
|
||||
xhttp.DrainBody(respBody)
|
||||
cancel()
|
||||
var ne *rest.NetworkError
|
||||
|
||||
@@ -34,7 +34,7 @@ func NewBucketSSEConfigSys() *BucketSSEConfigSys {
|
||||
// Get - gets bucket encryption config for the given bucket.
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -72,7 +72,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
|
||||
// Get buckets in the DNS
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && err != dns.ErrNoEntriesFound {
|
||||
if err != nil && err != dns.ErrNoEntriesFound && err != dns.ErrNotImplemented {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return
|
||||
}
|
||||
@@ -80,33 +80,35 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
bucketsSet := set.NewStringSet()
|
||||
bucketsToBeUpdated := set.NewStringSet()
|
||||
bucketsInConflict := set.NewStringSet()
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
r, ok := dnsBuckets[bucket.Name]
|
||||
if !ok {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
// No difference in terms of domainIPs and nothing
|
||||
// has changed so we don't change anything on the etcd.
|
||||
if dnsBuckets != nil {
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
r, ok := dnsBuckets[bucket.Name]
|
||||
if !ok {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
// if domain IPs intersect then it won't be an empty set.
|
||||
// such an intersection means that bucket exists on etcd.
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
// No difference in terms of domainIPs and nothing
|
||||
// has changed so we don't change anything on the etcd.
|
||||
continue
|
||||
}
|
||||
// if domain IPs intersect then it won't be an empty set.
|
||||
// such an intersection means that bucket exists on etcd.
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
// No IPs seem to intersect, this means that bucket exists but has
|
||||
// different IP addresses perhaps from a different deployment.
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
bucketsInConflict.Add(bucket.Name)
|
||||
}
|
||||
// No IPs seem to intersect, this means that bucket exists but has
|
||||
// different IP addresses perhaps from a different deployment.
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
bucketsInConflict.Add(bucket.Name)
|
||||
}
|
||||
|
||||
// Add/update buckets that are not registered with the DNS
|
||||
@@ -449,14 +451,15 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
deleteList := toNames(objectsToDelete)
|
||||
dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
})
|
||||
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
for i := range errs {
|
||||
dindex := objectsToDelete[deleteList[i]]
|
||||
apiErr := toAPIError(ctx, errs[i])
|
||||
if apiErr.Code == "" || apiErr.Code == "NoSuchKey" {
|
||||
if apiErr.Code == "" || apiErr.Code == "NoSuchKey" || apiErr.Code == "InvalidArgument" {
|
||||
deletedObjects[dindex] = dObjects[i]
|
||||
continue
|
||||
}
|
||||
@@ -561,7 +564,9 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
if globalDNSConfig != nil {
|
||||
sr, err := globalDNSConfig.Get(bucket)
|
||||
if err != nil {
|
||||
if err == dns.ErrNoEntriesFound {
|
||||
// ErrNotImplemented indicates a DNS backend that doesn't need to check if bucket already
|
||||
// exists elsewhere
|
||||
if err == dns.ErrNoEntriesFound || err == dns.ErrNotImplemented {
|
||||
// Proceed to creating a bucket.
|
||||
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
@@ -654,16 +659,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if !api.EncryptionEnabled() && crypto.IsRequested(r.Header) {
|
||||
|
||||
if !objectAPI.IsEncryptionSupported() && crypto.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
|
||||
// To detect if the client has disconnected.
|
||||
r.Body = &contextReader{r.Body, r.Context()}
|
||||
|
||||
// Require Content-Length to be set in the request
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
@@ -804,7 +807,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if _, err = globalBucketSSEConfigSys.Get(bucket); err == nil || globalAutoEncryption {
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
r.Header.Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -999,7 +1002,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually using etcdctl", err))
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually", err))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ type LifecycleSys struct{}
|
||||
// Get - gets lifecycle config associated to a given bucket name.
|
||||
func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -93,7 +94,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketVersionsAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -113,7 +114,12 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if proxyRequestByBucket(ctx, w, r, bucket) {
|
||||
// Forward the request using Source IP or bucket
|
||||
forwardStr := handlers.GetSourceIPFromHeaders(r)
|
||||
if forwardStr == "" {
|
||||
forwardStr = bucket
|
||||
}
|
||||
if proxyRequestByStringHash(ctx, w, r, forwardStr) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -340,8 +346,8 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http
|
||||
return proxyRequest(ctx, w, r, ep)
|
||||
}
|
||||
|
||||
func proxyRequestByBucket(ctx context.Context, w http.ResponseWriter, r *http.Request, bucket string) (success bool) {
|
||||
return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(bucket, len(globalProxyEndpoints)))
|
||||
func proxyRequestByStringHash(ctx context.Context, w http.ResponseWriter, r *http.Request, str string) (success bool) {
|
||||
return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(str, len(globalProxyEndpoints)))
|
||||
}
|
||||
|
||||
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
|
||||
@@ -382,7 +388,12 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
if proxyRequestByBucket(ctx, w, r, bucket) {
|
||||
// Forward the request using Source IP or bucket
|
||||
forwardStr := handlers.GetSourceIPFromHeaders(r)
|
||||
if forwardStr == "" {
|
||||
forwardStr = bucket
|
||||
}
|
||||
if proxyRequestByStringHash(ctx, w, r, forwardStr) {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) {
|
||||
// Update update bucket metadata for the specified config file.
|
||||
// The configData data should not be modified after being sent here.
|
||||
func (sys *BucketMetadataSys) Update(bucket string, configFile string, configData []byte) error {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -81,7 +81,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
// This code is needed only for gateway implementations.
|
||||
switch configFile {
|
||||
case bucketSSEConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -90,7 +90,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketLifecycleConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -99,7 +99,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketTaggingConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -108,7 +108,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketNotificationConfig:
|
||||
if globalGatewayName == "nas" {
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -273,9 +273,9 @@ func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Life
|
||||
// GetNotificationConfig returns configured notification config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Config, error) {
|
||||
if globalIsGateway && globalGatewayName == "nas" {
|
||||
if globalIsGateway && globalGatewayName == NASBackendGateway {
|
||||
// Only needed in case of NAS gateway.
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -313,7 +313,7 @@ func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEC
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -376,7 +376,7 @@ func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.Buc
|
||||
// GetConfig returns a specific configuration from the bucket metadata.
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetConfig(bucket string) (BucketMetadata, error) {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return newBucketMetadata(bucket), errServerNotInitialized
|
||||
}
|
||||
|
||||
@@ -107,6 +107,10 @@ func newBucketMetadata(name string) BucketMetadata {
|
||||
// Load - loads the metadata of bucket by name from ObjectLayer api.
|
||||
// If an error is returned the returned metadata will be default initialized.
|
||||
func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string) error {
|
||||
if name == "" {
|
||||
logger.LogIf(ctx, errors.New("bucket name cannot be empty"))
|
||||
return errors.New("bucket name cannot be empty")
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
|
||||
data, err := readConfig(ctx, api, configFile)
|
||||
if err != nil {
|
||||
@@ -128,20 +132,22 @@ func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string)
|
||||
}
|
||||
// OK, parse data.
|
||||
_, err = b.UnmarshalMsg(data[4:])
|
||||
b.Name = name // in-case parsing failed for some reason, make sure bucket name is not empty.
|
||||
return err
|
||||
}
|
||||
|
||||
// loadBucketMetadata loads and migrates to bucket metadata.
|
||||
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (BucketMetadata, error) {
|
||||
b := newBucketMetadata(bucket)
|
||||
err := b.Load(ctx, objectAPI, bucket)
|
||||
err := b.Load(ctx, objectAPI, b.Name)
|
||||
if err == nil {
|
||||
return b, b.convertLegacyConfigs(ctx, objectAPI)
|
||||
}
|
||||
|
||||
if err != errConfigNotFound {
|
||||
if !errors.Is(err, errConfigNotFound) {
|
||||
return b, err
|
||||
}
|
||||
|
||||
// Old bucket without bucket metadata. Hence we migrate existing settings.
|
||||
return b, b.convertLegacyConfigs(ctx, objectAPI)
|
||||
}
|
||||
@@ -354,7 +360,7 @@ func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
|
||||
|
||||
// deleteBucketMetadata deletes bucket metadata
|
||||
// If config does not exist no error is returned.
|
||||
func deleteBucketMetadata(ctx context.Context, obj ObjectLayer, bucket string) error {
|
||||
func deleteBucketMetadata(ctx context.Context, obj objectDeleter, bucket string) error {
|
||||
metadataFiles := []string{
|
||||
dataUsageCacheName,
|
||||
bucketMetadataFile,
|
||||
|
||||
@@ -33,7 +33,7 @@ type BucketObjectLockSys struct{}
|
||||
// Get - Get retention configuration.
|
||||
func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return r, errServerNotInitialized
|
||||
}
|
||||
|
||||
@@ -151,9 +151,10 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
if ok {
|
||||
// Special case for AD/LDAP STS users
|
||||
if k == ldapUser {
|
||||
args[ldapUserPolicyVariable] = []string{vStr}
|
||||
args["user"] = []string{vStr}
|
||||
} else {
|
||||
args[k] = []string{vStr}
|
||||
}
|
||||
args[k] = []string{vStr}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
|
||||
}
|
||||
|
||||
func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64) error {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -71,6 +71,8 @@ func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64)
|
||||
sys.bucketStorageCache.Once.Do(func() {
|
||||
sys.bucketStorageCache.TTL = 1 * time.Second
|
||||
sys.bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer done()
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -18,7 +18,9 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -36,7 +38,7 @@ import (
|
||||
// gets replication config associated to a given bucket name.
|
||||
func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -50,12 +52,12 @@ func getReplicationConfig(ctx context.Context, bucketName string) (rc *replicati
|
||||
// validateReplicationDestination returns error if replication destination bucket missing or not configured
|
||||
// It also returns true if replication destination is same as this server.
|
||||
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config) (bool, error) {
|
||||
clnt := globalBucketTargetSys.GetReplicationTargetClient(ctx, rCfg.RoleArn)
|
||||
clnt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rCfg.RoleArn)
|
||||
if clnt == nil {
|
||||
return false, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
if found, _ := clnt.BucketExists(ctx, rCfg.GetDestination().Bucket); !found {
|
||||
return false, BucketReplicationDestinationNotFound{Bucket: rCfg.GetDestination().Bucket}
|
||||
return false, BucketRemoteDestinationNotFound{Bucket: rCfg.GetDestination().Bucket}
|
||||
}
|
||||
if ret, err := globalBucketObjectLockSys.Get(bucket); err == nil {
|
||||
if ret.LockEnabled {
|
||||
@@ -85,7 +87,7 @@ func mustReplicateWeb(ctx context.Context, r *http.Request, bucket, object strin
|
||||
|
||||
// mustReplicate returns true if object meets replication criteria.
|
||||
func mustReplicate(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string) bool {
|
||||
if s3Err := isPutActionAllowed(getRequestAuthType(r), bucket, object, r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
|
||||
if s3Err := isPutActionAllowed(getRequestAuthType(r), bucket, "", r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
|
||||
return false
|
||||
}
|
||||
return mustReplicater(ctx, r, bucket, object, meta, replStatus)
|
||||
@@ -137,15 +139,17 @@ func putReplicationOpts(dest replication.Destination, objInfo ObjectInfo) (putOp
|
||||
sc = objInfo.StorageClass
|
||||
}
|
||||
putOpts = miniogo.PutObjectOptions{
|
||||
UserMetadata: meta,
|
||||
UserTags: tag.ToMap(),
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: sc,
|
||||
ReplicationVersionID: objInfo.VersionID,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
ReplicationMTime: objInfo.ModTime,
|
||||
ReplicationETag: objInfo.ETag,
|
||||
UserMetadata: meta,
|
||||
UserTags: tag.ToMap(),
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: sc,
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
},
|
||||
}
|
||||
if mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {
|
||||
rmode := miniogo.RetentionMode(mode)
|
||||
@@ -169,38 +173,52 @@ func putReplicationOpts(dest replication.Destination, objInfo ObjectInfo) (putOp
|
||||
|
||||
// replicateObject replicates the specified version of the object to destination bucket
|
||||
// The source object is then updated to reflect the replication status.
|
||||
func replicateObject(ctx context.Context, bucket, object, versionID string, objectAPI ObjectLayer, eventArg *eventArgs, healPending bool) {
|
||||
func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLayer) {
|
||||
bucket := objInfo.Bucket
|
||||
object := objInfo.Name
|
||||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetReplicationTargetClient(ctx, cfg.RoleArn)
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, cfg.RoleArn)
|
||||
if tgt == nil {
|
||||
return
|
||||
}
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer gr.Close()
|
||||
objInfo := gr.ObjInfo
|
||||
|
||||
objInfo = gr.ObjInfo
|
||||
size, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
gr.Close()
|
||||
return
|
||||
}
|
||||
|
||||
dest := cfg.GetDestination()
|
||||
if dest.Bucket == "" {
|
||||
gr.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// if heal encounters a pending replication status, either replication
|
||||
// has failed due to server shutdown or crawler and PutObject replication are in contention.
|
||||
healPending := objInfo.ReplicationStatus == replication.Pending
|
||||
|
||||
// In the rare event that replication is in pending state either due to
|
||||
// server shut down/crash before replication completed or healing and PutObject
|
||||
// race - do an additional stat to see if the version ID exists
|
||||
if healPending {
|
||||
_, err := tgt.StatObject(ctx, dest.Bucket, object, miniogo.StatObjectOptions{VersionID: objInfo.VersionID})
|
||||
if err == nil {
|
||||
gr.Close()
|
||||
// object with same VersionID already exists, replication kicked off by
|
||||
// PutObject might have completed.
|
||||
return
|
||||
@@ -210,29 +228,35 @@ func replicateObject(ctx context.Context, bucket, object, versionID string, obje
|
||||
|
||||
replicationStatus := replication.Complete
|
||||
_, err = tgt.PutObject(ctx, dest.Bucket, object, gr, size, "", "", putOpts)
|
||||
gr.Close()
|
||||
if err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
// Notify replication failure event.
|
||||
if eventArg == nil {
|
||||
eventArg = &eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
}
|
||||
}
|
||||
eventArg.EventName = event.OperationReplicationFailed
|
||||
eventArg.Object.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||
sendEvent(*eventArg)
|
||||
}
|
||||
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||
if objInfo.UserTags != "" {
|
||||
objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
}
|
||||
|
||||
// FIXME: add support for missing replication events
|
||||
// - event.ObjectReplicationNotTracked
|
||||
// - event.ObjectReplicationMissedThreshold
|
||||
// - event.ObjectReplicationReplicatedAfterThreshold
|
||||
if replicationStatus == replication.Failed {
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationFailed,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
}
|
||||
|
||||
objInfo.metadataOnly = true // Perform only metadata updates.
|
||||
if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
}, ObjectOptions{VersionID: objInfo.VersionID}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s: %s", objInfo.VersionID, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -258,3 +282,69 @@ func filterReplicationStatusMetadata(metadata map[string]string) map[string]stri
|
||||
delKey(xhttp.AmzBucketReplicationStatus)
|
||||
return dst
|
||||
}
|
||||
|
||||
type replicationState struct {
|
||||
// add future metrics here
|
||||
replicaCh chan ObjectInfo
|
||||
}
|
||||
|
||||
func (r *replicationState) queueReplicaTask(oi ObjectInfo) {
|
||||
select {
|
||||
case r.replicaCh <- oi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalReplicationState *replicationState
|
||||
// TODO: currently keeping it conservative
|
||||
// but eventually can be tuned in future,
|
||||
// take only half the CPUs for replication
|
||||
// conservatively.
|
||||
globalReplicationConcurrent = runtime.GOMAXPROCS(0) / 2
|
||||
)
|
||||
|
||||
func newReplicationState() *replicationState {
|
||||
|
||||
// fix minimum concurrent replication to 1 for single CPU setup
|
||||
if globalReplicationConcurrent == 0 {
|
||||
globalReplicationConcurrent = 1
|
||||
}
|
||||
rs := &replicationState{
|
||||
replicaCh: make(chan ObjectInfo, 10000),
|
||||
}
|
||||
go func() {
|
||||
<-GlobalContext.Done()
|
||||
close(rs.replicaCh)
|
||||
}()
|
||||
return rs
|
||||
}
|
||||
|
||||
// addWorker creates a new worker to process tasks
|
||||
func (r *replicationState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
// Add a new worker.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case oi, ok := <-r.replicaCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateObject(ctx, oi, objectAPI)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
|
||||
if globalReplicationState == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start with globalReplicationConcurrent.
|
||||
for i := 0; i < globalReplicationConcurrent; i++ {
|
||||
globalReplicationState.addWorker(ctx, objectAPI)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
@@ -91,16 +92,15 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
}
|
||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||
if err != nil {
|
||||
if isErrBucketNotFound(err) {
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketReplicationTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return err
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
@@ -112,6 +112,9 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
if t.Arn == tgt.Arn {
|
||||
return BucketRemoteAlreadyExists{Bucket: t.TargetBucket}
|
||||
}
|
||||
if t.Label == tgt.Label {
|
||||
return BucketRemoteLabelInUse{Bucket: t.TargetBucket}
|
||||
}
|
||||
newtgts[idx] = *tgt
|
||||
found = true
|
||||
continue
|
||||
@@ -172,8 +175,8 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetReplicationTargetClient returns minio-go client for replication target instance
|
||||
func (sys *BucketTargetSys) GetReplicationTargetClient(ctx context.Context, arn string) *miniogo.Core {
|
||||
// GetRemoteTargetClient returns minio-go client for replication target instance
|
||||
func (sys *BucketTargetSys) GetRemoteTargetClient(ctx context.Context, arn string) *miniogo.Core {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
return sys.arnRemotesMap[arn]
|
||||
|
||||
@@ -70,6 +70,14 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
||||
}, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "A replication configuration is present on this bucket, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(v)
|
||||
if err != nil {
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -42,15 +43,11 @@ func init() {
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
logger.RegisterError(config.FmtError)
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(GlobalContext)
|
||||
logger.AddTarget(globalConsoleSys)
|
||||
|
||||
gob.Register(StorageErr(""))
|
||||
}
|
||||
|
||||
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
|
||||
if (globalAutoEncryption || GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
if (GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Encryption support is requested but '%s' does not support encryption", name)
|
||||
}
|
||||
@@ -293,7 +290,7 @@ func logStartupMessage(msg string) {
|
||||
logger.StartupMessage(msg)
|
||||
}
|
||||
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn bool, err error) {
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
|
||||
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
|
||||
return nil, nil, false, nil
|
||||
}
|
||||
@@ -302,11 +299,71 @@ func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn b
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
c, err = certs.New(getPublicCertFile(), getPrivateKeyFile(), config.LoadX509KeyPair)
|
||||
manager, err = certs.NewManager(GlobalContext, getPublicCertFile(), getPrivateKeyFile(), config.LoadX509KeyPair)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
// MinIO has support for multiple certificates. It expects the following structure:
|
||||
// certs/
|
||||
// │
|
||||
// ├─ public.crt
|
||||
// ├─ private.key
|
||||
// │
|
||||
// ├─ example.com/
|
||||
// │ │
|
||||
// │ ├─ public.crt
|
||||
// │ └─ private.key
|
||||
// └─ foobar.org/
|
||||
// │
|
||||
// ├─ public.crt
|
||||
// └─ private.key
|
||||
// ...
|
||||
//
|
||||
// Therefore, we read all filenames in the cert directory and check
|
||||
// for each directory whether it contains a public.crt and private.key.
|
||||
// If so, we try to add it to certificate manager.
|
||||
root, err := os.Open(globalCertsDir.Get())
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
defer root.Close()
|
||||
|
||||
files, err := root.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
for _, file := range files {
|
||||
// Ignore all
|
||||
// - regular files
|
||||
// - "CAs" directory
|
||||
// - any directory which starts with ".."
|
||||
if file.Mode().IsRegular() || file.Name() == "CAs" || strings.HasPrefix(file.Name(), "..") {
|
||||
continue
|
||||
}
|
||||
if file.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
file, err = os.Stat(filepath.Join(root.Name(), file.Name()))
|
||||
if err != nil {
|
||||
// not accessible ignore
|
||||
continue
|
||||
}
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
certFile = filepath.Join(root.Name(), file.Name(), publicCertFile)
|
||||
keyFile = filepath.Join(root.Name(), file.Name(), privateKeyFile)
|
||||
)
|
||||
if !isFile(certFile) || !isFile(keyFile) {
|
||||
continue
|
||||
}
|
||||
if err = manager.AddCertificate(certFile, keyFile); err != nil {
|
||||
err = fmt.Errorf("Unable to load TLS certificate '%s,%s': %w", certFile, keyFile, err)
|
||||
logger.LogIf(GlobalContext, err, logger.Minio)
|
||||
}
|
||||
}
|
||||
secureConn = true
|
||||
return x509Certs, c, secureConn, nil
|
||||
return x509Certs, manager, secureConn, nil
|
||||
}
|
||||
|
||||
@@ -46,7 +46,11 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
type objectDeleter interface {
|
||||
DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error)
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI objectDeleter, configFile string) error {
|
||||
_, err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile, ObjectOptions{})
|
||||
if err != nil && isErrObjectNotFound(err) {
|
||||
return errConfigNotFound
|
||||
|
||||
@@ -25,8 +25,9 @@ import (
|
||||
"github.com/minio/minio/cmd/config/api"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
"github.com/minio/minio/cmd/config/crawler"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/config/etcd"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
xldap "github.com/minio/minio/cmd/config/identity/ldap"
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
@@ -55,6 +56,7 @@ func initHelp() {
|
||||
config.KmsKesSubSys: crypto.DefaultKesKVS,
|
||||
config.LoggerWebhookSubSys: logger.DefaultKVS,
|
||||
config.AuditWebhookSubSys: logger.DefaultAuditKVS,
|
||||
config.CrawlerSubSys: crawler.DefaultKVS,
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
kvs[k] = v
|
||||
@@ -106,6 +108,10 @@ func initHelp() {
|
||||
Key: config.APISubSys,
|
||||
Description: "manage global HTTP API call specific features, such as throttling, authentication types, etc.",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CrawlerSubSys,
|
||||
Description: "manage continuous disk crawling for bucket disk usage, lifecycle, quota and data integrity checks",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.LoggerWebhookSubSys,
|
||||
Description: "send server logs to webhook endpoints",
|
||||
@@ -185,6 +191,7 @@ func initHelp() {
|
||||
config.EtcdSubSys: etcd.Help,
|
||||
config.CacheSubSys: cache.Help,
|
||||
config.CompressionSubSys: compress.Help,
|
||||
config.CrawlerSubSys: crawler.Help,
|
||||
config.IdentityOpenIDSubSys: openid.Help,
|
||||
config.IdentityLDAPSubSys: xldap.Help,
|
||||
config.PolicyOPASubSys: opa.Help,
|
||||
@@ -246,6 +253,10 @@ func validateConfig(s config.Config, setDriveCount int) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := crawler.LookupConfig(s[config.CrawlerSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
{
|
||||
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
|
||||
if err != nil {
|
||||
@@ -306,8 +317,7 @@ func validateConfig(s config.Config, setDriveCount int) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return notify.TestNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport(),
|
||||
globalNotificationSys.ConfiguredTargetIDs())
|
||||
return notify.TestNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), globalNotificationSys.ConfiguredTargetIDs())
|
||||
}
|
||||
|
||||
func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
@@ -322,6 +332,19 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
}
|
||||
}
|
||||
|
||||
if dnsURL, dnsUser, dnsPass, ok := env.LookupEnv(config.EnvDNSWebhook); ok {
|
||||
globalDNSConfig, err = dns.NewOperatorDNS(dnsURL,
|
||||
dns.Authentication(dnsUser, dnsPass),
|
||||
dns.RootCAs(globalRootCAs))
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
logger.FatalIf(err, "Unable to initialize remote webhook DNS config")
|
||||
} else {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
@@ -343,19 +366,25 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil && globalDNSConfig == nil {
|
||||
globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config,
|
||||
dns.DomainNames(globalDomainNames),
|
||||
dns.DomainIPs(globalDomainIPs),
|
||||
dns.DomainPort(globalMinioPort),
|
||||
dns.CoreDNSPath(etcdCfg.CoreDNSPath),
|
||||
)
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
logger.FatalIf(err, "Unable to initialize DNS config")
|
||||
} else {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w",
|
||||
globalDomainNames, err))
|
||||
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
|
||||
if globalDNSConfig != nil {
|
||||
// if global DNS is already configured, indicate with a warning, incase
|
||||
// users are confused.
|
||||
logger.LogIf(ctx, fmt.Errorf("DNS store is already configured with %s, not using etcd for DNS store", globalDNSConfig))
|
||||
} else {
|
||||
globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config,
|
||||
dns.DomainNames(globalDomainNames),
|
||||
dns.DomainIPs(globalDomainIPs),
|
||||
dns.DomainPort(globalMinioPort),
|
||||
dns.CoreDNSPath(etcdCfg.CoreDNSPath),
|
||||
)
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
logger.FatalIf(err, "Unable to initialize DNS config")
|
||||
} else {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w",
|
||||
globalDomainNames, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -378,7 +407,12 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
|
||||
globalAPIConfig.init(apiConfig)
|
||||
globalAPIConfig.init(apiConfig, setDriveCount)
|
||||
|
||||
// Initialize remote instance transport once.
|
||||
getRemoteInstanceTransportOnce.Do(func() {
|
||||
getRemoteInstanceTransport = newGatewayHTTPTransport(apiConfig.RemoteTransportDeadline)
|
||||
})
|
||||
|
||||
if globalIsErasure {
|
||||
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
|
||||
@@ -404,6 +438,10 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
}
|
||||
}
|
||||
}
|
||||
globalCrawlerConfig, err = crawler.LookupConfig(s[config.CrawlerSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to read crawler config: %w", err))
|
||||
}
|
||||
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
|
||||
if err != nil {
|
||||
@@ -417,6 +455,9 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
|
||||
// Enable auto-encryption if enabled
|
||||
globalAutoEncryption = kmsCfg.AutoEncryption
|
||||
if globalAutoEncryption && !globalIsGateway {
|
||||
logger.LogIf(ctx, fmt.Errorf("%s env is deprecated please migrate to using `mc encrypt` at bucket level", crypto.EnvKMSAutoEncryption))
|
||||
}
|
||||
|
||||
globalCompressConfig, err = compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
@@ -452,11 +493,13 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize logger: %w", err))
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.HTTP {
|
||||
for k, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
// Enable http logging
|
||||
if err = logger.AddTarget(
|
||||
http.New(http.WithEndpoint(l.Endpoint),
|
||||
http.New(
|
||||
http.WithTargetName(k),
|
||||
http.WithEndpoint(l.Endpoint),
|
||||
http.WithAuthToken(l.AuthToken),
|
||||
http.WithUserAgent(loggerUserAgent),
|
||||
http.WithLogKind(string(logger.All)),
|
||||
@@ -468,11 +511,13 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.Audit {
|
||||
for k, l := range loggerCfg.Audit {
|
||||
if l.Enabled {
|
||||
// Enable http audit logging
|
||||
if err = logger.AddAuditTarget(
|
||||
http.New(http.WithEndpoint(l.Endpoint),
|
||||
http.New(
|
||||
http.WithTargetName(k),
|
||||
http.WithEndpoint(l.Endpoint),
|
||||
http.WithAuthToken(l.AuthToken),
|
||||
http.WithUserAgent(loggerUserAgent),
|
||||
http.WithLogKind(string(logger.All)),
|
||||
@@ -484,12 +529,12 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
}
|
||||
}
|
||||
|
||||
globalConfigTargetList, err = notify.GetNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport(), false)
|
||||
globalConfigTargetList, err = notify.GetNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), false)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
|
||||
}
|
||||
|
||||
globalEnvTargetList, err = notify.GetNotificationTargets(newServerConfig(), GlobalContext.Done(), NewGatewayHTTPTransport(), true)
|
||||
globalEnvTargetList, err = notify.GetNotificationTargets(GlobalContext, newServerConfig(), NewGatewayHTTPTransport(), true)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
|
||||
}
|
||||
|
||||
@@ -29,15 +29,23 @@ import (
|
||||
|
||||
// API sub-system constants
|
||||
const (
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiReadyDeadline = "ready_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiClusterDeadline = "cluster_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRemoteTransportDeadline = "remote_transport_deadline"
|
||||
|
||||
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
|
||||
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
|
||||
EnvAPIReadyDeadline = "MINIO_API_READY_DEADLINE"
|
||||
EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN"
|
||||
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
|
||||
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
|
||||
EnvAPIClusterDeadline = "MINIO_API_CLUSTER_DEADLINE"
|
||||
EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN"
|
||||
EnvAPIRemoteTransportDeadline = "MINIO_API_REMOTE_TRANSPORT_DEADLINE"
|
||||
)
|
||||
|
||||
// Deprecated key and ENVs
|
||||
const (
|
||||
apiReadyDeadline = "ready_deadline"
|
||||
EnvAPIReadyDeadline = "MINIO_API_READY_DEADLINE"
|
||||
)
|
||||
|
||||
// DefaultKVS - default storage class config
|
||||
@@ -52,22 +60,27 @@ var (
|
||||
Value: "10s",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiReadyDeadline,
|
||||
Key: apiClusterDeadline,
|
||||
Value: "10s",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiCorsAllowOrigin,
|
||||
Value: "*",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiRemoteTransportDeadline,
|
||||
Value: "2h",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// Config storage class configuration
|
||||
type Config struct {
|
||||
APIRequestsMax int `json:"requests_max"`
|
||||
APIRequestsDeadline time.Duration `json:"requests_deadline"`
|
||||
APIReadyDeadline time.Duration `json:"ready_deadline"`
|
||||
APICorsAllowOrigin []string `json:"cors_allow_origin"`
|
||||
RequestsMax int `json:"requests_max"`
|
||||
RequestsDeadline time.Duration `json:"requests_deadline"`
|
||||
ClusterDeadline time.Duration `json:"cluster_deadline"`
|
||||
CorsAllowOrigin []string `json:"cors_allow_origin"`
|
||||
RemoteTransportDeadline time.Duration `json:"remote_transport_deadline"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
|
||||
@@ -83,6 +96,9 @@ func (sCfg *Config) UnmarshalJSON(data []byte) error {
|
||||
|
||||
// LookupConfig - lookup api config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
// remove this since we have removed this already.
|
||||
kvs.Delete(apiReadyDeadline)
|
||||
|
||||
if err = config.CheckValidKeys(config.APISubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
@@ -102,16 +118,23 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
readyDeadline, err := time.ParseDuration(env.Get(EnvAPIReadyDeadline, kvs.Get(apiReadyDeadline)))
|
||||
clusterDeadline, err := time.ParseDuration(env.Get(EnvAPIClusterDeadline, kvs.Get(apiClusterDeadline)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
corsAllowOrigin := strings.Split(env.Get(EnvAPICorsAllowOrigin, kvs.Get(apiCorsAllowOrigin)), ",")
|
||||
|
||||
remoteTransportDeadline, err := time.ParseDuration(env.Get(EnvAPIRemoteTransportDeadline, kvs.Get(apiRemoteTransportDeadline)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
return Config{
|
||||
APIRequestsMax: requestsMax,
|
||||
APIRequestsDeadline: requestsDeadline,
|
||||
APIReadyDeadline: readyDeadline,
|
||||
APICorsAllowOrigin: corsAllowOrigin,
|
||||
RequestsMax: requestsMax,
|
||||
RequestsDeadline: requestsDeadline,
|
||||
ClusterDeadline: clusterDeadline,
|
||||
CorsAllowOrigin: corsAllowOrigin,
|
||||
RemoteTransportDeadline: remoteTransportDeadline,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -39,5 +39,11 @@ var (
|
||||
Optional: true,
|
||||
Type: "csv",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: apiRemoteTransportDeadline,
|
||||
Description: `set the deadline for API requests on remote transports while proxying between federated instances e.g. "2h"`,
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -76,6 +76,7 @@ const (
|
||||
KmsKesSubSys = "kms_kes"
|
||||
LoggerWebhookSubSys = "logger_webhook"
|
||||
AuditWebhookSubSys = "audit_webhook"
|
||||
CrawlerSubSys = "crawler"
|
||||
|
||||
// Add new constants here if you add new fields to config.
|
||||
)
|
||||
@@ -112,6 +113,7 @@ var SubSystems = set.CreateStringSet([]string{
|
||||
PolicyOPASubSys,
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
CrawlerSubSys,
|
||||
NotifyAMQPSubSys,
|
||||
NotifyESSubSys,
|
||||
NotifyKafkaSubSys,
|
||||
@@ -138,6 +140,7 @@ var SubSystemsSingleTargets = set.CreateStringSet([]string{
|
||||
PolicyOPASubSys,
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
CrawlerSubSys,
|
||||
}...)
|
||||
|
||||
// Constant separators
|
||||
@@ -264,6 +267,16 @@ func (kvs KVS) Get(key string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Delete - deletes the key if present from the KV list.
|
||||
func (kvs *KVS) Delete(key string) {
|
||||
for i, kv := range *kvs {
|
||||
if kv.Key == key {
|
||||
*kvs = append((*kvs)[:i], (*kvs)[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup - lookup a key in a list of KVS
|
||||
func (kvs KVS) Lookup(key string) (string, bool) {
|
||||
for _, kv := range kvs {
|
||||
@@ -448,6 +461,11 @@ func (c Config) Merge() Config {
|
||||
ckvs.Set(kv.Key, kv.Value)
|
||||
}
|
||||
}
|
||||
if _, ok := cp[subSys]; !ok {
|
||||
// A config subsystem was removed or server was downgraded.
|
||||
Logger.Info("config: ignoring unknown subsystem config %q\n", subSys)
|
||||
continue
|
||||
}
|
||||
cp[subSys][tgt] = ckvs
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ const (
|
||||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvFSOSync = "MINIO_FS_OSYNC"
|
||||
EnvArgs = "MINIO_ARGS"
|
||||
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
|
||||
|
||||
EnvUpdate = "MINIO_UPDATE"
|
||||
|
||||
|
||||
67
cmd/config/crawler/crawler.go
Normal file
67
cmd/config/crawler/crawler.go
Normal file
@@ -0,0 +1,67 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package crawler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
)
|
||||
|
||||
// Compression environment variables
|
||||
const (
|
||||
BitrotScan = "bitrotscan"
|
||||
)
|
||||
|
||||
// Config represents the crawler settings.
|
||||
type Config struct {
|
||||
// Bitrot will perform bitrot scan on local disk when checking objects.
|
||||
Bitrot bool `json:"bitrotscan"`
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultKVS - default KV config for crawler settings
|
||||
DefaultKVS = config.KVS{
|
||||
config.KV{
|
||||
Key: BitrotScan,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
}
|
||||
|
||||
// Help provides help for config values
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: BitrotScan,
|
||||
Description: `perform bitrot scan on disks when checking objects during crawl`,
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// LookupConfig - lookup config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
if err = config.CheckValidKeys(config.CrawlerSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
bitrot := kvs.Get(BitrotScan)
|
||||
if bitrot != config.EnableOn && bitrot != config.EnableOff {
|
||||
return cfg, errors.New(BitrotScan + ": must be 'on' or 'off'")
|
||||
}
|
||||
cfg.Bitrot = bitrot == config.EnableOn
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -26,9 +26,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
|
||||
"github.com/coredns/coredns/plugin/etcd/msg"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"go.etcd.io/etcd/v3/clientv3"
|
||||
)
|
||||
|
||||
@@ -214,6 +213,11 @@ func (c *CoreDNS) DeleteRecord(record SrvRecord) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// String stringer name for this implementation of dns.Store
|
||||
func (c *CoreDNS) String() string {
|
||||
return "etcdDNS"
|
||||
}
|
||||
|
||||
// CoreDNS - represents dns config for coredns server.
|
||||
type CoreDNS struct {
|
||||
domainNames []string
|
||||
@@ -223,13 +227,13 @@ type CoreDNS struct {
|
||||
etcdClient *clientv3.Client
|
||||
}
|
||||
|
||||
// Option - functional options pattern style
|
||||
type Option func(*CoreDNS)
|
||||
// EtcdOption - functional options pattern style
|
||||
type EtcdOption func(*CoreDNS)
|
||||
|
||||
// DomainNames set a list of domain names used by this CoreDNS
|
||||
// client setting, note this will fail if set to empty when
|
||||
// constructor initializes.
|
||||
func DomainNames(domainNames []string) Option {
|
||||
func DomainNames(domainNames []string) EtcdOption {
|
||||
return func(args *CoreDNS) {
|
||||
args.domainNames = domainNames
|
||||
}
|
||||
@@ -237,14 +241,14 @@ func DomainNames(domainNames []string) Option {
|
||||
|
||||
// DomainIPs set a list of custom domain IPs, note this will
|
||||
// fail if set to empty when constructor initializes.
|
||||
func DomainIPs(domainIPs set.StringSet) Option {
|
||||
func DomainIPs(domainIPs set.StringSet) EtcdOption {
|
||||
return func(args *CoreDNS) {
|
||||
args.domainIPs = domainIPs
|
||||
}
|
||||
}
|
||||
|
||||
// DomainPort - is a string version of server port
|
||||
func DomainPort(domainPort string) Option {
|
||||
func DomainPort(domainPort string) EtcdOption {
|
||||
return func(args *CoreDNS) {
|
||||
args.domainPort = domainPort
|
||||
}
|
||||
@@ -253,14 +257,14 @@ func DomainPort(domainPort string) Option {
|
||||
// CoreDNSPath - custom prefix on etcd to populate DNS
|
||||
// service records, optional and can be empty.
|
||||
// if empty then c.prefixPath is used i.e "/skydns"
|
||||
func CoreDNSPath(prefix string) Option {
|
||||
func CoreDNSPath(prefix string) EtcdOption {
|
||||
return func(args *CoreDNS) {
|
||||
args.prefixPath = prefix
|
||||
}
|
||||
}
|
||||
|
||||
// NewCoreDNS - initialize a new coreDNS set/unset values.
|
||||
func NewCoreDNS(cfg clientv3.Config, setters ...Option) (Store, error) {
|
||||
func NewCoreDNS(cfg clientv3.Config, setters ...EtcdOption) (Store, error) {
|
||||
etcdClient, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
234
cmd/config/dns/operator_dns.go
Normal file
234
cmd/config/dns/operator_dns.go
Normal file
@@ -0,0 +1,234 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultOperatorContextTimeout = 10 * time.Second
|
||||
// ErrNotImplemented - Indicates the functionality which is not implemented
|
||||
ErrNotImplemented = errors.New("The method is not implemented")
|
||||
)
|
||||
|
||||
func (c *OperatorDNS) addAuthHeader(r *http.Request) error {
|
||||
if c.username == "" || c.password == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
claims := &jwt.StandardClaims{
|
||||
ExpiresAt: int64(15 * time.Minute),
|
||||
Issuer: c.username,
|
||||
Subject: config.EnvDNSWebhook,
|
||||
}
|
||||
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS512, claims)
|
||||
ss, err := token.SignedString([]byte(c.password))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Header.Set("Authorization", "Bearer "+ss)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *OperatorDNS) endpoint(bucket string, delete bool) (string, error) {
|
||||
u, err := url.Parse(c.Endpoint)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
q := u.Query()
|
||||
q.Add("bucket", bucket)
|
||||
q.Add("delete", strconv.FormatBool(delete))
|
||||
u.RawQuery = q.Encode()
|
||||
return u.String(), nil
|
||||
}
|
||||
|
||||
// Put - Adds DNS entries into operator webhook server
|
||||
func (c *OperatorDNS) Put(bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultOperatorContextTimeout)
|
||||
defer cancel()
|
||||
e, err := c.endpoint(bucket, false)
|
||||
if err != nil {
|
||||
return newError(bucket, err)
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e, nil)
|
||||
if err != nil {
|
||||
return newError(bucket, err)
|
||||
}
|
||||
if err = c.addAuthHeader(req); err != nil {
|
||||
return newError(bucket, err)
|
||||
}
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
if derr := c.Delete(bucket); derr != nil {
|
||||
return newError(bucket, derr)
|
||||
}
|
||||
}
|
||||
var errorStringBuilder strings.Builder
|
||||
io.Copy(&errorStringBuilder, io.LimitReader(resp.Body, resp.ContentLength))
|
||||
xhttp.DrainBody(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
errorString := errorStringBuilder.String()
|
||||
return newError(bucket, fmt.Errorf("service create for bucket %s, failed with status %s, error %s", bucket, resp.Status, errorString))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newError(bucket string, err error) error {
|
||||
e := Error{bucket, err}
|
||||
if strings.Contains(err.Error(), "invalid bucket name") {
|
||||
return ErrInvalidBucketName(e)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// Delete - Removes DNS entries added in Put().
|
||||
func (c *OperatorDNS) Delete(bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultOperatorContextTimeout)
|
||||
defer cancel()
|
||||
e, err := c.endpoint(bucket, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = c.addAuthHeader(req); err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
xhttp.DrainBody(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("request to delete the service for bucket %s, failed with status %s", bucket, resp.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRecord - Removes a specific DNS entry
|
||||
// No Op for Operator because operator deals on with bucket entries
|
||||
func (c *OperatorDNS) DeleteRecord(record SrvRecord) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
// Close closes the internal http client
|
||||
func (c *OperatorDNS) Close() error {
|
||||
c.httpClient.CloseIdleConnections()
|
||||
return nil
|
||||
}
|
||||
|
||||
// List - Retrieves list of DNS entries for the domain.
|
||||
// This is a No Op for Operator because, there is no intent to enforce global
|
||||
// namespace at MinIO level with this DNS entry. The global namespace in
|
||||
// enforced by the Kubernetes Operator
|
||||
func (c *OperatorDNS) List() (srvRecords map[string][]SrvRecord, err error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
|
||||
// Get - Retrieves DNS records for a bucket.
|
||||
// This is a No Op for Operator because, there is no intent to enforce global
|
||||
// namespace at MinIO level with this DNS entry. The global namespace in
|
||||
// enforced by the Kubernetes Operator
|
||||
func (c *OperatorDNS) Get(bucket string) (srvRecords []SrvRecord, err error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
|
||||
// String stringer name for this implementation of dns.Store
|
||||
func (c *OperatorDNS) String() string {
|
||||
return "webhookDNS"
|
||||
}
|
||||
|
||||
// OperatorDNS - represents dns config for MinIO k8s operator.
|
||||
type OperatorDNS struct {
|
||||
httpClient *http.Client
|
||||
Endpoint string
|
||||
rootCAs *x509.CertPool
|
||||
username string
|
||||
password string
|
||||
}
|
||||
|
||||
// OperatorOption - functional options pattern style for OperatorDNS
|
||||
type OperatorOption func(*OperatorDNS)
|
||||
|
||||
// Authentication - custom username and password for authenticating at the endpoint
|
||||
func Authentication(username, password string) OperatorOption {
|
||||
return func(args *OperatorDNS) {
|
||||
args.username = username
|
||||
args.password = password
|
||||
}
|
||||
}
|
||||
|
||||
// RootCAs - add custom trust certs pool
|
||||
func RootCAs(CAs *x509.CertPool) OperatorOption {
|
||||
return func(args *OperatorDNS) {
|
||||
args.rootCAs = CAs
|
||||
}
|
||||
}
|
||||
|
||||
// NewOperatorDNS - initialize a new K8S Operator DNS set/unset values.
|
||||
func NewOperatorDNS(endpoint string, setters ...OperatorOption) (Store, error) {
|
||||
if endpoint == "" {
|
||||
return nil, errors.New("invalid argument")
|
||||
}
|
||||
|
||||
args := &OperatorDNS{
|
||||
Endpoint: endpoint,
|
||||
}
|
||||
for _, setter := range setters {
|
||||
setter(args)
|
||||
}
|
||||
args.httpClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 3 * time.Second,
|
||||
KeepAlive: 5 * time.Second,
|
||||
}).DialContext,
|
||||
ResponseHeaderTimeout: 3 * time.Second,
|
||||
TLSHandshakeTimeout: 3 * time.Second,
|
||||
ExpectContinueTimeout: 3 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: args.rootCAs,
|
||||
},
|
||||
// Go net/http automatically unzip if content-type is
|
||||
// gzip disable this feature, as we are always interested
|
||||
// in raw stream.
|
||||
DisableCompression: true,
|
||||
},
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
44
cmd/config/dns/store.go
Normal file
44
cmd/config/dns/store.go
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
// Error - DNS related errors error.
|
||||
type Error struct {
|
||||
Bucket string
|
||||
Err error
|
||||
}
|
||||
|
||||
// ErrInvalidBucketName for buckets with invalid name
|
||||
type ErrInvalidBucketName Error
|
||||
|
||||
func (e ErrInvalidBucketName) Error() string {
|
||||
return "invalid bucket name error: " + e.Err.Error()
|
||||
}
|
||||
func (e Error) Error() string {
|
||||
return "dns related error: " + e.Err.Error()
|
||||
}
|
||||
|
||||
// Store dns record store
|
||||
type Store interface {
|
||||
Put(bucket string) error
|
||||
Get(bucket string) ([]SrvRecord, error)
|
||||
Delete(bucket string) error
|
||||
List() (map[string][]SrvRecord, error)
|
||||
DeleteRecord(record SrvRecord) error
|
||||
Close() error
|
||||
String() string
|
||||
}
|
||||
@@ -19,7 +19,6 @@ package config
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
@@ -111,18 +110,16 @@ func ErrorToErr(err error) Err {
|
||||
case *net.OpError:
|
||||
return ErrPortAccess(err).Msg("Insufficient permissions to use specified port")
|
||||
}
|
||||
return ErrNoPermissionsToAccessDirFiles(err).Msg("Insufficient permissions to access path")
|
||||
} else if errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
return ErrUnexpectedDataContent(err)
|
||||
} else {
|
||||
// Failed to identify what type of error this, return a simple UI error
|
||||
return Err{msg: err.Error()}
|
||||
}
|
||||
|
||||
// Failed to identify what type of error this, return a simple UI error
|
||||
return Err{msg: err.Error()}
|
||||
}
|
||||
|
||||
// FmtError converts a fatal error message to a more clear error
|
||||
// using some colors
|
||||
func FmtError(introMsg string, err error, jsonFlag bool) string {
|
||||
|
||||
renderedTxt := ""
|
||||
uiErr := ErrorToErr(err)
|
||||
// JSON print
|
||||
|
||||
@@ -205,12 +205,6 @@ Example 1:
|
||||
`Use 'sudo setcap cap_net_bind_service=+ep /path/to/minio' to provide sufficient permissions`,
|
||||
)
|
||||
|
||||
ErrNoPermissionsToAccessDirFiles = newErrFn(
|
||||
"Missing permissions to access the specified path",
|
||||
"Please ensure the specified path can be accessed",
|
||||
"",
|
||||
)
|
||||
|
||||
ErrSSLUnexpectedError = newErrFn(
|
||||
"Invalid TLS certificate",
|
||||
"Please check the content of your certificate data",
|
||||
@@ -247,12 +241,6 @@ Example 1:
|
||||
"",
|
||||
)
|
||||
|
||||
ErrUnexpectedDataContent = newErrFn(
|
||||
"Unexpected data content",
|
||||
"Please contact MinIO at https://slack.min.io",
|
||||
"",
|
||||
)
|
||||
|
||||
ErrUnexpectedError = newErrFn(
|
||||
"Unexpected error",
|
||||
"Please contact MinIO at https://slack.min.io",
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
@@ -49,10 +50,14 @@ type Config struct {
|
||||
publicKeys map[string]crypto.PublicKey
|
||||
transport *http.Transport
|
||||
closeRespFn func(io.ReadCloser)
|
||||
mutex *sync.Mutex
|
||||
}
|
||||
|
||||
// PopulatePublicKey - populates a new publickey from the JWKS URL.
|
||||
func (r *Config) PopulatePublicKey() error {
|
||||
r.mutex.Lock()
|
||||
defer r.mutex.Unlock()
|
||||
|
||||
if r.JWKS.URL == nil || r.JWKS.URL.String() == "" {
|
||||
return nil
|
||||
}
|
||||
@@ -185,7 +190,15 @@ func (p *JWT) Validate(token, dsecs string) (map[string]interface{}, error) {
|
||||
var claims jwtgo.MapClaims
|
||||
jwtToken, err := jp.ParseWithClaims(token, &claims, keyFuncCallback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Re-populate the public key in-case the JWKS
|
||||
// pubkeys are refreshed
|
||||
if err = p.PopulatePublicKey(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jwtToken, err = jwtgo.ParseWithClaims(token, &claims, keyFuncCallback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !jwtToken.Valid {
|
||||
@@ -317,6 +330,7 @@ func LookupConfig(kvs config.KVS, transport *http.Transport, closeRespFn func(io
|
||||
ClientID: env.Get(EnvIdentityOpenIDClientID, kvs.Get(ClientID)),
|
||||
transport: transport,
|
||||
closeRespFn: closeRespFn,
|
||||
mutex: &sync.Mutex{}, // allocate for copying
|
||||
}
|
||||
|
||||
configURL := env.Get(EnvIdentityOpenIDURL, kvs.Get(ConfigURL))
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"crypto"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -89,6 +90,7 @@ func TestJWTAzureFail(t *testing.T) {
|
||||
}
|
||||
|
||||
cfg := Config{}
|
||||
cfg.mutex = &sync.Mutex{}
|
||||
cfg.JWKS.URL = u1
|
||||
cfg.publicKeys = keys
|
||||
jwt := NewJWT(cfg)
|
||||
@@ -136,6 +138,7 @@ func TestJWT(t *testing.T) {
|
||||
}
|
||||
|
||||
cfg := Config{}
|
||||
cfg.mutex = &sync.Mutex{}
|
||||
cfg.JWKS.URL = u1
|
||||
cfg.publicKeys = keys
|
||||
jwt := NewJWT(cfg)
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package dns
|
||||
package config
|
||||
|
||||
// Store dns record store
|
||||
type Store interface {
|
||||
Put(bucket string) error
|
||||
Get(bucket string) ([]SrvRecord, error)
|
||||
Delete(bucket string) error
|
||||
List() (map[string][]SrvRecord, error)
|
||||
DeleteRecord(record SrvRecord) error
|
||||
Close() error
|
||||
import "context"
|
||||
|
||||
// Logger contains injected logger methods.
|
||||
var Logger = struct {
|
||||
Info func(msg string, data ...interface{})
|
||||
LogIf func(ctx context.Context, err error, errKind ...interface{})
|
||||
}{
|
||||
// Initialized via injection.
|
||||
}
|
||||
@@ -340,6 +340,12 @@ var (
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.PostgresMaxOpenConnections,
|
||||
Description: "To set the maximum number of open connections to the database. The value is set to `2` by default.",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
}
|
||||
|
||||
HelpMySQL = config.HelpKVS{
|
||||
@@ -377,6 +383,12 @@ var (
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MySQLMaxOpenConnections,
|
||||
Description: "To set the maximum number of open connections to the database. The value is set to `2` by default.",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
}
|
||||
|
||||
HelpNATS = config.HelpKVS{
|
||||
|
||||
@@ -357,6 +357,10 @@ func SetNotifyPostgres(s config.Config, psqName string, cfg target.PostgreSQLArg
|
||||
Key: target.PostgresQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresMaxOpenConnections,
|
||||
Value: strconv.Itoa(cfg.MaxOpenConnections),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -554,6 +558,10 @@ func SetNotifyMySQL(s config.Config, sqlName string, cfg target.MySQLArgs) error
|
||||
Key: target.MySQLQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLMaxOpenConnections,
|
||||
Value: strconv.Itoa(cfg.MaxOpenConnections),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -43,11 +43,10 @@ var ErrTargetsOffline = errors.New("one or more targets are offline. Please use
|
||||
|
||||
// TestNotificationTargets is similar to GetNotificationTargets()
|
||||
// avoids explicit registration.
|
||||
func TestNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport,
|
||||
targetIDs []event.TargetID) error {
|
||||
func TestNotificationTargets(ctx context.Context, cfg config.Config, transport *http.Transport, targetIDs []event.TargetID) error {
|
||||
test := true
|
||||
returnOnTargetError := true
|
||||
targets, err := RegisterNotificationTargets(cfg, doneCh, transport, targetIDs, test, returnOnTargetError)
|
||||
targets, err := RegisterNotificationTargets(ctx, cfg, transport, targetIDs, test, returnOnTargetError)
|
||||
if err == nil {
|
||||
// Close all targets since we are only testing connections.
|
||||
for _, t := range targets.TargetMap() {
|
||||
@@ -60,9 +59,9 @@ func TestNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transpor
|
||||
|
||||
// GetNotificationTargets registers and initializes all notification
|
||||
// targets, returns error if any.
|
||||
func GetNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, test bool) (*event.TargetList, error) {
|
||||
func GetNotificationTargets(ctx context.Context, cfg config.Config, transport *http.Transport, test bool) (*event.TargetList, error) {
|
||||
returnOnTargetError := false
|
||||
return RegisterNotificationTargets(cfg, doneCh, transport, nil, test, returnOnTargetError)
|
||||
return RegisterNotificationTargets(ctx, cfg, transport, nil, test, returnOnTargetError)
|
||||
}
|
||||
|
||||
// RegisterNotificationTargets - returns TargetList which contains enabled targets in serverConfig.
|
||||
@@ -70,8 +69,8 @@ func GetNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
// * Add a new target in pkg/event/target package.
|
||||
// * Add newly added target configuration to serverConfig.Notify.<TARGET_NAME>.
|
||||
// * Handle the configuration in this function to create/add into TargetList.
|
||||
func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, targetIDs []event.TargetID, test bool, returnOnTargetError bool) (*event.TargetList, error) {
|
||||
targetList, err := FetchRegisteredTargets(cfg, doneCh, transport, test, returnOnTargetError)
|
||||
func RegisterNotificationTargets(ctx context.Context, cfg config.Config, transport *http.Transport, targetIDs []event.TargetID, test bool, returnOnTargetError bool) (*event.TargetList, error) {
|
||||
targetList, err := FetchRegisteredTargets(ctx, cfg, transport, test, returnOnTargetError)
|
||||
if err != nil {
|
||||
return targetList, err
|
||||
}
|
||||
@@ -94,7 +93,7 @@ func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, tran
|
||||
// FetchRegisteredTargets - Returns a set of configured TargetList
|
||||
// If `returnOnTargetError` is set to true, The function returns when a target initialization fails
|
||||
// Else, the function will return a complete TargetList irrespective of errors
|
||||
func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, test bool, returnOnTargetError bool) (_ *event.TargetList, err error) {
|
||||
func FetchRegisteredTargets(ctx context.Context, cfg config.Config, transport *http.Transport, test bool, returnOnTargetError bool) (_ *event.TargetList, err error) {
|
||||
targetList := event.NewTargetList()
|
||||
var targetsOffline bool
|
||||
|
||||
@@ -167,7 +166,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewAMQPTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewAMQPTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -188,7 +187,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewElasticsearchTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewElasticsearchTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -209,7 +208,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
continue
|
||||
}
|
||||
args.TLS.RootCAs = transport.TLSClientConfig.RootCAs
|
||||
newTarget, err := target.NewKafkaTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewKafkaTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -230,7 +229,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
continue
|
||||
}
|
||||
args.RootCAs = transport.TLSClientConfig.RootCAs
|
||||
newTarget, err := target.NewMQTTTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewMQTTTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -250,7 +249,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewMySQLTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewMySQLTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -270,7 +269,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewNATSTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewNATSTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -290,7 +289,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewNSQTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewNSQTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -310,7 +309,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -330,7 +329,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewRedisTarget(id, args, doneCh, logger.LogOnceIf, test)
|
||||
newTarget, err := target.NewRedisTarget(id, args, ctx.Done(), logger.LogOnceIf, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -350,7 +349,7 @@ func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport
|
||||
if !args.Enable {
|
||||
continue
|
||||
}
|
||||
newTarget, err := target.NewWebhookTarget(id, args, doneCh, logger.LogOnceIf, transport, test)
|
||||
newTarget, err := target.NewWebhookTarget(ctx, id, args, logger.LogOnceIf, transport, test)
|
||||
if err != nil {
|
||||
targetsOffline = true
|
||||
if returnOnTargetError {
|
||||
@@ -808,6 +807,10 @@ var (
|
||||
Key: target.MySQLQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLMaxOpenConnections,
|
||||
Value: "2",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -856,13 +859,25 @@ func GetNotifyMySQL(mysqlKVS map[string]config.KVS) (map[string]target.MySQLArgs
|
||||
if k != config.Default {
|
||||
queueDirEnv = queueDirEnv + config.Default + k
|
||||
}
|
||||
|
||||
maxOpenConnectionsEnv := target.EnvMySQLMaxOpenConnections
|
||||
if k != config.Default {
|
||||
maxOpenConnectionsEnv = maxOpenConnectionsEnv + config.Default + k
|
||||
}
|
||||
|
||||
maxOpenConnections, cErr := strconv.Atoi(env.Get(maxOpenConnectionsEnv, kv.Get(target.MySQLMaxOpenConnections)))
|
||||
if cErr != nil {
|
||||
return nil, cErr
|
||||
}
|
||||
|
||||
mysqlArgs := target.MySQLArgs{
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.MySQLFormat)),
|
||||
DSN: env.Get(dsnStringEnv, kv.Get(target.MySQLDSNString)),
|
||||
Table: env.Get(tableEnv, kv.Get(target.MySQLTable)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.MySQLQueueDir)),
|
||||
QueueLimit: queueLimit,
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.MySQLFormat)),
|
||||
DSN: env.Get(dsnStringEnv, kv.Get(target.MySQLDSNString)),
|
||||
Table: env.Get(tableEnv, kv.Get(target.MySQLTable)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.MySQLQueueDir)),
|
||||
QueueLimit: queueLimit,
|
||||
MaxOpenConnections: maxOpenConnections,
|
||||
}
|
||||
if err = mysqlArgs.Validate(); err != nil {
|
||||
return nil, err
|
||||
@@ -1236,6 +1251,10 @@ var (
|
||||
Key: target.PostgresQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresMaxOpenConnections,
|
||||
Value: "2",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1286,13 +1305,24 @@ func GetNotifyPostgres(postgresKVS map[string]config.KVS) (map[string]target.Pos
|
||||
queueDirEnv = queueDirEnv + config.Default + k
|
||||
}
|
||||
|
||||
maxOpenConnectionsEnv := target.EnvPostgresMaxOpenConnections
|
||||
if k != config.Default {
|
||||
maxOpenConnectionsEnv = maxOpenConnectionsEnv + config.Default + k
|
||||
}
|
||||
|
||||
maxOpenConnections, cErr := strconv.Atoi(env.Get(maxOpenConnectionsEnv, kv.Get(target.PostgresMaxOpenConnections)))
|
||||
if cErr != nil {
|
||||
return nil, cErr
|
||||
}
|
||||
|
||||
psqlArgs := target.PostgreSQLArgs{
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.PostgresFormat)),
|
||||
ConnectionString: env.Get(connectionStringEnv, kv.Get(target.PostgresConnectionString)),
|
||||
Table: env.Get(tableEnv, kv.Get(target.PostgresTable)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.PostgresQueueDir)),
|
||||
QueueLimit: uint64(queueLimit),
|
||||
Enable: enabled,
|
||||
Format: env.Get(formatEnv, kv.Get(target.PostgresFormat)),
|
||||
ConnectionString: env.Get(connectionStringEnv, kv.Get(target.PostgresConnectionString)),
|
||||
Table: env.Get(tableEnv, kv.Get(target.PostgresTable)),
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.PostgresQueueDir)),
|
||||
QueueLimit: uint64(queueLimit),
|
||||
MaxOpenConnections: maxOpenConnections,
|
||||
}
|
||||
if err = psqlArgs.Validate(); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -156,7 +156,7 @@ func parseStorageClass(storageClassEnv string) (sc StorageClass, err error) {
|
||||
}
|
||||
|
||||
// Validates the parity disks.
|
||||
func validateParity(ssParity, rrsParity, drivesPerSet int) (err error) {
|
||||
func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
|
||||
if ssParity == 0 && rrsParity == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -174,12 +174,12 @@ func validateParity(ssParity, rrsParity, drivesPerSet int) (err error) {
|
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be greater than or equal to %d", rrsParity, minParityDisks)
|
||||
}
|
||||
|
||||
if ssParity > drivesPerSet/2 {
|
||||
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, drivesPerSet/2)
|
||||
if ssParity > setDriveCount/2 {
|
||||
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, setDriveCount/2)
|
||||
}
|
||||
|
||||
if rrsParity > drivesPerSet/2 {
|
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, drivesPerSet/2)
|
||||
if rrsParity > setDriveCount/2 {
|
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, setDriveCount/2)
|
||||
}
|
||||
|
||||
if ssParity > 0 && rrsParity > 0 {
|
||||
@@ -220,9 +220,9 @@ func Enabled(kvs config.KVS) bool {
|
||||
}
|
||||
|
||||
// LookupConfig - lookup storage class config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
|
||||
func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
|
||||
cfg = Config{}
|
||||
cfg.Standard.Parity = drivesPerSet / 2
|
||||
cfg.Standard.Parity = setDriveCount / 2
|
||||
cfg.RRS.Parity = defaultRRSParity
|
||||
|
||||
if err = config.CheckValidKeys(config.StorageClassSubSys, kvs, DefaultKVS); err != nil {
|
||||
@@ -239,7 +239,7 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
|
||||
}
|
||||
}
|
||||
if cfg.Standard.Parity == 0 {
|
||||
cfg.Standard.Parity = drivesPerSet / 2
|
||||
cfg.Standard.Parity = setDriveCount / 2
|
||||
}
|
||||
|
||||
if rrsc != "" {
|
||||
@@ -254,7 +254,7 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
|
||||
|
||||
// Validation is done after parsing both the storage classes. This is needed because we need one
|
||||
// storage class value to deduce the correct value of the other storage class.
|
||||
if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, drivesPerSet); err != nil {
|
||||
if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, setDriveCount); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -69,10 +69,10 @@ func TestParseStorageClass(t *testing.T) {
|
||||
|
||||
func TestValidateParity(t *testing.T) {
|
||||
tests := []struct {
|
||||
rrsParity int
|
||||
ssParity int
|
||||
success bool
|
||||
drivesPerSet int
|
||||
rrsParity int
|
||||
ssParity int
|
||||
success bool
|
||||
setDriveCount int
|
||||
}{
|
||||
{2, 4, true, 16},
|
||||
{3, 3, true, 16},
|
||||
@@ -85,7 +85,7 @@ func TestValidateParity(t *testing.T) {
|
||||
{9, 2, false, 16},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
err := validateParity(tt.ssParity, tt.rrsParity, tt.drivesPerSet)
|
||||
err := validateParity(tt.ssParity, tt.rrsParity, tt.setDriveCount)
|
||||
if err != nil && tt.success {
|
||||
t.Errorf("Test %d, Expected success, got %s", i+1, err)
|
||||
}
|
||||
|
||||
@@ -122,6 +122,34 @@ func (sys *HTTPConsoleLoggerSys) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Endpoint - dummy function for interface compatibility
|
||||
func (sys *HTTPConsoleLoggerSys) Endpoint() string {
|
||||
return sys.console.Endpoint()
|
||||
}
|
||||
|
||||
// String - stringer function for interface compatibility
|
||||
func (sys *HTTPConsoleLoggerSys) String() string {
|
||||
return "console+http"
|
||||
}
|
||||
|
||||
// Content returns the console stdout log
|
||||
func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {
|
||||
sys.RLock()
|
||||
sys.logBuf.Do(func(p interface{}) {
|
||||
if p != nil {
|
||||
lg, ok := p.(log.Info)
|
||||
if ok {
|
||||
if (lg.Entry != log.Entry{}) {
|
||||
logs = append(logs, lg.Entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
sys.RUnlock()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Send log message 'e' to console and publish to console
|
||||
// log pubsub system
|
||||
func (sys *HTTPConsoleLoggerSys) Send(e interface{}, logKind string) error {
|
||||
|
||||
@@ -16,11 +16,14 @@ package crypto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/ellipses"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
@@ -167,7 +170,8 @@ const (
|
||||
|
||||
const (
|
||||
// EnvKMSKesEndpoint is the environment variable used to specify
|
||||
// the kes server HTTPS endpoint.
|
||||
// one or multiple KES server HTTPS endpoints. The individual
|
||||
// endpoints should be separated by ','.
|
||||
EnvKMSKesEndpoint = "MINIO_KMS_KES_ENDPOINT"
|
||||
|
||||
// EnvKMSKesKeyFile is the environment variable used to specify
|
||||
@@ -216,16 +220,36 @@ func LookupKesConfig(kvs config.KVS) (KesConfig, error) {
|
||||
kesCfg := KesConfig{}
|
||||
|
||||
endpointStr := env.Get(EnvKMSKesEndpoint, kvs.Get(KMSKesEndpoint))
|
||||
if endpointStr != "" {
|
||||
// Lookup kes configuration & overwrite config entry if ENV var is present
|
||||
endpoint, err := xnet.ParseHTTPURL(endpointStr)
|
||||
var endpoints []string
|
||||
for _, endpoint := range strings.Split(endpointStr, ",") {
|
||||
if strings.TrimSpace(endpoint) == "" {
|
||||
continue
|
||||
}
|
||||
if !ellipses.HasEllipses(endpoint) {
|
||||
endpoints = append(endpoints, endpoint)
|
||||
continue
|
||||
}
|
||||
pattern, err := ellipses.FindEllipsesPatterns(endpoint)
|
||||
if err != nil {
|
||||
return kesCfg, err
|
||||
}
|
||||
endpointStr = endpoint.String()
|
||||
for _, p := range pattern {
|
||||
endpoints = append(endpoints, p.Expand()...)
|
||||
}
|
||||
}
|
||||
if len(endpoints) == 0 {
|
||||
return kesCfg, nil
|
||||
}
|
||||
|
||||
kesCfg.Endpoint = endpointStr
|
||||
randNum := rand.Intn(len(endpoints) + 1) // We add 1 b/c len(endpoints) may be 0: See: rand.Intn docs
|
||||
kesCfg.Endpoint = make([]string, len(endpoints))
|
||||
for i, endpoint := range endpoints {
|
||||
endpoint, err := xnet.ParseHTTPURL(endpoint)
|
||||
if err != nil {
|
||||
return kesCfg, err
|
||||
}
|
||||
kesCfg.Endpoint[(randNum+i)%len(endpoints)] = endpoint.String()
|
||||
}
|
||||
kesCfg.KeyFile = env.Get(EnvKMSKesKeyFile, kvs.Get(KMSKesKeyFile))
|
||||
kesCfg.CertFile = env.Get(EnvKMSKesCertFile, kvs.Get(KMSKesCertFile))
|
||||
kesCfg.CAPath = env.Get(EnvKMSKesCAPath, kvs.Get(KMSKesCAPath))
|
||||
|
||||
@@ -46,8 +46,8 @@ var ErrKESKeyExists = NewKESError(http.StatusBadRequest, "key does already exist
|
||||
type KesConfig struct {
|
||||
Enabled bool
|
||||
|
||||
// The kes server endpoint.
|
||||
Endpoint string
|
||||
// The KES server endpoints.
|
||||
Endpoint []string
|
||||
|
||||
// The path to the TLS private key used
|
||||
// by MinIO to authenticate to the kes
|
||||
@@ -86,7 +86,7 @@ type KesConfig struct {
|
||||
// Verify verifies if the kes configuration is correct
|
||||
func (k KesConfig) Verify() (err error) {
|
||||
switch {
|
||||
case k.Endpoint == "":
|
||||
case len(k.Endpoint) == 0:
|
||||
err = Errorf("crypto: missing kes endpoint")
|
||||
case k.CertFile == "":
|
||||
err = Errorf("crypto: missing cert file")
|
||||
@@ -101,7 +101,7 @@ func (k KesConfig) Verify() (err error) {
|
||||
type kesService struct {
|
||||
client *kesClient
|
||||
|
||||
endpoint string
|
||||
endpoints []string
|
||||
defaultKeyID string
|
||||
}
|
||||
|
||||
@@ -141,12 +141,12 @@ func NewKes(cfg KesConfig) (KMS, error) {
|
||||
|
||||
return &kesService{
|
||||
client: &kesClient{
|
||||
addr: cfg.Endpoint,
|
||||
endpoints: cfg.Endpoint,
|
||||
httpClient: http.Client{
|
||||
Transport: cfg.Transport,
|
||||
},
|
||||
},
|
||||
endpoint: cfg.Endpoint,
|
||||
endpoints: cfg.Endpoint,
|
||||
defaultKeyID: cfg.DefaultKeyID,
|
||||
}, nil
|
||||
}
|
||||
@@ -163,9 +163,9 @@ func (kes *kesService) DefaultKeyID() string {
|
||||
// method.
|
||||
func (kes *kesService) Info() KMSInfo {
|
||||
return KMSInfo{
|
||||
Endpoint: kes.endpoint,
|
||||
Name: kes.DefaultKeyID(),
|
||||
AuthType: "TLS",
|
||||
Endpoints: kes.endpoints,
|
||||
Name: kes.DefaultKeyID(),
|
||||
AuthType: "TLS",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -221,7 +221,7 @@ func (kes *kesService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (k
|
||||
// • GenerateDataKey (API: /v1/key/generate/)
|
||||
// • DecryptDataKey (API: /v1/key/decrypt/)
|
||||
type kesClient struct {
|
||||
addr string
|
||||
endpoints []string
|
||||
httpClient http.Client
|
||||
}
|
||||
|
||||
@@ -232,8 +232,8 @@ type kesClient struct {
|
||||
// application does not have the cryptographic key at
|
||||
// any point in time.
|
||||
func (c *kesClient) CreateKey(name string) error {
|
||||
url := fmt.Sprintf("%s/v1/key/create/%s", c.addr, url.PathEscape(name))
|
||||
_, err := c.postRetry(url, nil, 0) // No request body and no response expected
|
||||
path := fmt.Sprintf("/v1/key/create/%s", url.PathEscape(name))
|
||||
_, err := c.postRetry(path, nil, 0) // No request body and no response expected
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -265,8 +265,8 @@ func (c *kesClient) GenerateDataKey(name string, context []byte) ([]byte, []byte
|
||||
}
|
||||
|
||||
const limit = 1 << 20 // A plaintext/ciphertext key pair will never be larger than 1 MB
|
||||
url := fmt.Sprintf("%s/v1/key/generate/%s", c.addr, url.PathEscape(name))
|
||||
resp, err := c.postRetry(url, bytes.NewReader(body), limit)
|
||||
path := fmt.Sprintf("/v1/key/generate/%s", url.PathEscape(name))
|
||||
resp, err := c.postRetry(path, bytes.NewReader(body), limit)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -302,8 +302,8 @@ func (c *kesClient) DecryptDataKey(name string, ciphertext, context []byte) ([]b
|
||||
}
|
||||
|
||||
const limit = 1 << 20 // A data key will never be larger than 1 MiB
|
||||
url := fmt.Sprintf("%s/v1/key/decrypt/%s", c.addr, url.PathEscape(name))
|
||||
resp, err := c.postRetry(url, bytes.NewReader(body), limit)
|
||||
path := fmt.Sprintf("/v1/key/decrypt/%s", url.PathEscape(name))
|
||||
resp, err := c.postRetry(path, bytes.NewReader(body), limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -402,29 +402,34 @@ func (c *kesClient) post(url string, body io.Reader, limit int64) (io.Reader, er
|
||||
return &respBody, nil
|
||||
}
|
||||
|
||||
func (c *kesClient) postRetry(url string, body io.ReadSeeker, limit int64) (io.Reader, error) {
|
||||
func (c *kesClient) postRetry(path string, body io.ReadSeeker, limit int64) (io.Reader, error) {
|
||||
retryMax := 1 + len(c.endpoints)
|
||||
for i := 0; ; i++ {
|
||||
if body != nil {
|
||||
body.Seek(0, io.SeekStart) // seek to the beginning of the body.
|
||||
}
|
||||
response, err := c.post(url, body, limit)
|
||||
|
||||
response, err := c.post(c.endpoints[i%len(c.endpoints)]+path, body, limit)
|
||||
if err == nil {
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// If the error is not temp. / retryable => fail the request immediately.
|
||||
if !xnet.IsNetworkOrHostDown(err) &&
|
||||
!errors.Is(err, io.EOF) &&
|
||||
!errors.Is(err, io.ErrUnexpectedEOF) &&
|
||||
!errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// retriable network errors.
|
||||
remain := retryMax - i
|
||||
if remain <= 0 {
|
||||
if remain := retryMax - i; remain <= 0 { // Fail if we exceeded our retry limit.
|
||||
return response, err
|
||||
}
|
||||
|
||||
// If there are more KES instances then skip waiting and
|
||||
// try the next endpoint directly.
|
||||
if i < len(c.endpoints) {
|
||||
continue
|
||||
}
|
||||
<-time.After(LinearJitterBackoff(retryWaitMin, retryWaitMax, i))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,9 +109,9 @@ type masterKeyKMS struct {
|
||||
// KMSInfo contains some describing information about
|
||||
// the KMS.
|
||||
type KMSInfo struct {
|
||||
Endpoint string
|
||||
Name string
|
||||
AuthType string
|
||||
Endpoints []string
|
||||
Name string
|
||||
AuthType string
|
||||
}
|
||||
|
||||
// NewMasterKey returns a basic KMS implementation from a single 256 bit master key.
|
||||
@@ -147,9 +147,9 @@ func (kms *masterKeyKMS) GenerateKey(keyID string, ctx Context) (key [32]byte, s
|
||||
// KMS is configured directly using master key
|
||||
func (kms *masterKeyKMS) Info() (info KMSInfo) {
|
||||
return KMSInfo{
|
||||
Endpoint: "",
|
||||
Name: "",
|
||||
AuthType: "master-key",
|
||||
Endpoints: []string{},
|
||||
Name: "",
|
||||
AuthType: "master-key",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -129,7 +129,7 @@ func (ssec) IsEncrypted(metadata map[string]string) bool {
|
||||
// metadata is nil.
|
||||
func CreateMultipartMetadata(metadata map[string]string) map[string]string {
|
||||
if metadata == nil {
|
||||
metadata = map[string]string{}
|
||||
return map[string]string{SSEMultipart: ""}
|
||||
}
|
||||
metadata[SSEMultipart] = ""
|
||||
return metadata
|
||||
@@ -156,7 +156,7 @@ func (s3) CreateMetadata(metadata map[string]string, keyID string, kmsKey []byte
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = map[string]string{}
|
||||
metadata = make(map[string]string, 5)
|
||||
}
|
||||
|
||||
metadata[SSESealAlgorithm] = sealedKey.Algorithm
|
||||
@@ -236,7 +236,7 @@ func (ssec) CreateMetadata(metadata map[string]string, sealedKey SealedKey) map[
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = map[string]string{}
|
||||
metadata = make(map[string]string, 3)
|
||||
}
|
||||
metadata[SSESealAlgorithm] = SealAlgorithm
|
||||
metadata[SSEIV] = base64.StdEncoding.EncodeToString(sealedKey.IV[:])
|
||||
|
||||
@@ -21,9 +21,8 @@ import (
|
||||
|
||||
// default retry configuration
|
||||
const (
|
||||
retryWaitMin = 500 * time.Millisecond // minimum retry limit.
|
||||
retryWaitMax = 3 * time.Second // 3 secs worth of max retry.
|
||||
retryMax = 2
|
||||
retryWaitMin = 100 * time.Millisecond // minimum retry limit.
|
||||
retryWaitMax = 1500 * time.Millisecond // 1.5 secs worth of max retry.
|
||||
)
|
||||
|
||||
// LinearJitterBackoff provides the time.Duration for a caller to
|
||||
|
||||
@@ -199,13 +199,13 @@ func (v *vaultService) DefaultKeyID() string {
|
||||
}
|
||||
|
||||
// Info returns some information about the Vault,
|
||||
// configuration - like the endpoint or authentication
|
||||
// configuration - like the endpoints or authentication
|
||||
// method.
|
||||
func (v *vaultService) Info() KMSInfo {
|
||||
return KMSInfo{
|
||||
Endpoint: v.config.Endpoint,
|
||||
Name: v.DefaultKeyID(),
|
||||
AuthType: v.config.Auth.Type,
|
||||
Endpoints: []string{v.config.Endpoint},
|
||||
Name: v.DefaultKeyID(),
|
||||
AuthType: v.config.Auth.Type,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,15 +21,15 @@ import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/crawler"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/willf/bloom"
|
||||
)
|
||||
|
||||
@@ -46,7 +47,14 @@ const (
|
||||
dataCrawlStartDelay = 5 * time.Minute // Time to wait on startup and between cycles.
|
||||
dataUsageUpdateDirCycles = 16 // Visit all folders every n cycles.
|
||||
|
||||
healDeleteDangling = true
|
||||
healDeleteDangling = true
|
||||
healFolderIncludeProb = 32 // Include a clean folder one in n cycles.
|
||||
healObjectSelectProb = 512 // Overall probability of a file being scanned; one in n.
|
||||
)
|
||||
|
||||
var (
|
||||
globalCrawlerConfig crawler.Config
|
||||
dataCrawlerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
)
|
||||
|
||||
// initDataCrawler will start the crawler unless disabled.
|
||||
@@ -60,6 +68,19 @@ func initDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
// The function will block until the context is canceled.
|
||||
// There should only ever be one crawler running per cluster.
|
||||
func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Make sure only 1 crawler is running on the cluster.
|
||||
locker := objAPI.NewNSLock(ctx, minioMetaBucket, "runDataCrawler.lock")
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for {
|
||||
err := locker.GetLock(dataCrawlerLeaderLockTimeout)
|
||||
if err != nil {
|
||||
time.Sleep(time.Duration(r.Float64() * float64(dataCrawlStartDelay)))
|
||||
continue
|
||||
}
|
||||
break
|
||||
// No unlock for "leader" lock.
|
||||
}
|
||||
|
||||
// Load current bloom cycle
|
||||
nextBloomCycle := intDataUpdateTracker.current() + 1
|
||||
var buf bytes.Buffer
|
||||
@@ -174,9 +195,9 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
// Enable healing in XL mode.
|
||||
if globalIsErasure {
|
||||
// Include a clean folder one in n cycles.
|
||||
s.healFolderInclude = 32
|
||||
s.healFolderInclude = healFolderIncludeProb
|
||||
// Do a heal check on an object once every n cycles. Must divide into healFolderInclude
|
||||
s.healObjectSelect = 512
|
||||
s.healObjectSelect = healObjectSelectProb
|
||||
}
|
||||
if len(cache.Info.BloomFilter) > 0 {
|
||||
s.withFilter = &bloomFilter{BloomFilter: &bloom.BloomFilter{}}
|
||||
@@ -327,7 +348,7 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
// If there are lifecycle rules for the prefix, remove the filter.
|
||||
filter := f.withFilter
|
||||
var activeLifeCycle *lifecycle.Lifecycle
|
||||
if f.oldCache.Info.lifeCycle != nil && filter != nil {
|
||||
if f.oldCache.Info.lifeCycle != nil {
|
||||
_, prefix := path2BucketObjectWithBasePath(f.root, folder.name)
|
||||
if f.oldCache.Info.lifeCycle.HasActiveRules(prefix, true) {
|
||||
if f.dataUsageCrawlDebug {
|
||||
@@ -441,7 +462,7 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
continue
|
||||
}
|
||||
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
continue
|
||||
}
|
||||
@@ -602,8 +623,9 @@ func (i *crawlItem) transformMetaDir() {
|
||||
|
||||
// actionMeta contains information used to apply actions.
|
||||
type actionMeta struct {
|
||||
oi ObjectInfo
|
||||
numVersions int // The number of versions of this object
|
||||
oi ObjectInfo
|
||||
successorModTime time.Time // The modtime of the successor version
|
||||
numVersions int // The number of versions of this object
|
||||
}
|
||||
|
||||
// applyActions will apply lifecycle checks on to a scanned item.
|
||||
@@ -623,7 +645,7 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return 0
|
||||
}
|
||||
if !errors.Is(err, NotImplemented{}) {
|
||||
if err != nil && !errors.Is(err, NotImplemented{}) {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
@@ -636,13 +658,14 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
|
||||
versionID := meta.oi.VersionID
|
||||
action := i.lifeCycle.ComputeAction(
|
||||
lifecycle.ObjectOpts{
|
||||
Name: i.objectPath(),
|
||||
UserTags: meta.oi.UserTags,
|
||||
ModTime: meta.oi.ModTime,
|
||||
VersionID: meta.oi.VersionID,
|
||||
DeleteMarker: meta.oi.DeleteMarker,
|
||||
IsLatest: meta.oi.IsLatest,
|
||||
NumVersions: meta.numVersions,
|
||||
Name: i.objectPath(),
|
||||
UserTags: meta.oi.UserTags,
|
||||
ModTime: meta.oi.ModTime,
|
||||
VersionID: meta.oi.VersionID,
|
||||
DeleteMarker: meta.oi.DeleteMarker,
|
||||
IsLatest: meta.oi.IsLatest,
|
||||
NumVersions: meta.numVersions,
|
||||
SuccessorModTime: meta.successorModTime,
|
||||
})
|
||||
if i.debug {
|
||||
logger.Info(color.Green("applyActions:")+" lifecycle: %q (version-id=%s), Initial scan: %v", i.objectPath(), versionID, action)
|
||||
@@ -679,13 +702,14 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
|
||||
// Recalculate action.
|
||||
action = i.lifeCycle.ComputeAction(
|
||||
lifecycle.ObjectOpts{
|
||||
Name: i.objectPath(),
|
||||
UserTags: obj.UserTags,
|
||||
ModTime: obj.ModTime,
|
||||
VersionID: obj.VersionID,
|
||||
DeleteMarker: obj.DeleteMarker,
|
||||
IsLatest: obj.IsLatest,
|
||||
NumVersions: meta.numVersions,
|
||||
Name: i.objectPath(),
|
||||
UserTags: obj.UserTags,
|
||||
ModTime: obj.ModTime,
|
||||
VersionID: obj.VersionID,
|
||||
DeleteMarker: obj.DeleteMarker,
|
||||
IsLatest: obj.IsLatest,
|
||||
NumVersions: meta.numVersions,
|
||||
SuccessorModTime: meta.successorModTime,
|
||||
})
|
||||
if i.debug {
|
||||
logger.Info(color.Green("applyActions:")+" lifecycle: Secondary scan: %v", action)
|
||||
@@ -756,9 +780,6 @@ func sleepDuration(d time.Duration, x float64) {
|
||||
func (i *crawlItem) healReplication(ctx context.Context, o ObjectLayer, meta actionMeta) {
|
||||
if meta.oi.ReplicationStatus == replication.Pending ||
|
||||
meta.oi.ReplicationStatus == replication.Failed {
|
||||
// if heal encounters a pending replication status, either replication
|
||||
// has failed due to server shutdown or crawler and PutObject replication are in contention.
|
||||
healPending := meta.oi.ReplicationStatus == replication.Pending
|
||||
replicateObject(ctx, meta.oi.Bucket, meta.oi.Name, meta.oi.VersionID, o, nil, healPending)
|
||||
globalReplicationState.queueReplicaTask(meta.oi)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ const (
|
||||
dataUpdateTrackerQueueSize = 10000
|
||||
|
||||
dataUpdateTrackerFilename = dataUsageBucket + SlashSeparator + ".tracker.bin"
|
||||
dataUpdateTrackerVersion = 2
|
||||
dataUpdateTrackerVersion = 3
|
||||
dataUpdateTrackerSaveInterval = 5 * time.Minute
|
||||
)
|
||||
|
||||
@@ -236,7 +236,10 @@ func (d *dataUpdateTracker) startSaver(ctx context.Context, interval time.Durati
|
||||
d.mu.Lock()
|
||||
if !d.dirty {
|
||||
d.mu.Unlock()
|
||||
return
|
||||
if exit {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
d.Saved = UTCNow()
|
||||
err := d.serialize(&buf)
|
||||
@@ -363,7 +366,7 @@ func (d *dataUpdateTracker) deserialize(src io.Reader, newerThan time.Time) erro
|
||||
return err
|
||||
}
|
||||
switch tmp[0] {
|
||||
case 1:
|
||||
case 1, 2:
|
||||
logger.Info(color.Green("dataUpdateTracker: ") + "deprecated data version, updating.")
|
||||
return nil
|
||||
case dataUpdateTrackerVersion:
|
||||
@@ -426,6 +429,8 @@ func (d *dataUpdateTracker) deserialize(src io.Reader, newerThan time.Time) erro
|
||||
}
|
||||
// Ignore what remains on the stream.
|
||||
// Update d:
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.Current = dst.Current
|
||||
d.History = dst.History
|
||||
d.Saved = dst.Saved
|
||||
|
||||
@@ -42,6 +42,14 @@ type testingLogger struct {
|
||||
t testLoggerI
|
||||
}
|
||||
|
||||
func (t *testingLogger) Endpoint() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (t *testingLogger) String() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (t *testingLogger) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -428,10 +428,15 @@ func (d *dataUsageCache) merge(other dataUsageCache) {
|
||||
}
|
||||
}
|
||||
|
||||
type objectIO interface {
|
||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
}
|
||||
|
||||
// load the cache content with name from minioMetaBackgroundOpsBucket.
|
||||
// Only backend errors are returned as errors.
|
||||
// If the object is not found or unable to deserialize d is cleared and nil error is returned.
|
||||
func (d *dataUsageCache) load(ctx context.Context, store ObjectLayer, name string) error {
|
||||
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
|
||||
var buf bytes.Buffer
|
||||
err := store.GetObject(ctx, dataUsageBucket, name, 0, -1, &buf, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
@@ -450,7 +455,7 @@ func (d *dataUsageCache) load(ctx context.Context, store ObjectLayer, name strin
|
||||
}
|
||||
|
||||
// save the content of the cache to minioMetaBackgroundOpsBucket with the provided name.
|
||||
func (d *dataUsageCache) save(ctx context.Context, store ObjectLayer, name string) error {
|
||||
func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) error {
|
||||
b := d.serialize()
|
||||
size := int64(len(b))
|
||||
r, err := hash.NewReader(bytes.NewReader(b), size, "", "", size, false)
|
||||
|
||||
@@ -240,7 +240,6 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
t.Fatal("got nil result")
|
||||
}
|
||||
if w.flatten {
|
||||
t.Log(e.Children)
|
||||
*e = got.flatten(*e)
|
||||
}
|
||||
if e.Size != int64(w.size) {
|
||||
|
||||
@@ -157,7 +157,7 @@ func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCa
|
||||
}
|
||||
cache := diskCache{
|
||||
dir: dir,
|
||||
triggerGC: make(chan struct{}),
|
||||
triggerGC: make(chan struct{}, 1),
|
||||
stats: CacheDiskStats{Dir: dir},
|
||||
quotaPct: quotaPct,
|
||||
after: config.After,
|
||||
@@ -174,7 +174,7 @@ func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCa
|
||||
nsMutex: newNSLock(false),
|
||||
}
|
||||
go cache.purgeWait(ctx)
|
||||
cache.diskUsageHigh() // update if cache usage is already high.
|
||||
cache.diskSpaceAvailable(0) // update if cache usage is already high.
|
||||
cache.NewNSLockFn = func(ctx context.Context, cachePath string) RWLocker {
|
||||
return cache.nsMutex.NewNSLock(ctx, nil, cachePath, "")
|
||||
}
|
||||
@@ -194,7 +194,7 @@ func (c *diskCache) diskUsageLow() bool {
|
||||
logger.LogIf(ctx, err)
|
||||
return false
|
||||
}
|
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total
|
||||
usedPercent := (di.Used / di.Total) * 100
|
||||
low := int(usedPercent) < gcStopPct
|
||||
atomic.StoreUint64(&c.stats.UsagePercent, usedPercent)
|
||||
if low {
|
||||
@@ -203,9 +203,9 @@ func (c *diskCache) diskUsageLow() bool {
|
||||
return low
|
||||
}
|
||||
|
||||
// Returns if the disk usage reaches high water mark w.r.t the configured cache quota.
|
||||
// gc starts if high water mark reached.
|
||||
func (c *diskCache) diskUsageHigh() bool {
|
||||
// Returns if the disk usage reaches or exceeds configured cache quota when size is added.
|
||||
// If current usage without size exceeds high watermark a GC is automatically queued.
|
||||
func (c *diskCache) diskSpaceAvailable(size int64) bool {
|
||||
gcTriggerPct := c.quotaPct * c.highWatermark / 100
|
||||
di, err := disk.GetInfo(c.dir)
|
||||
if err != nil {
|
||||
@@ -214,27 +214,30 @@ func (c *diskCache) diskUsageHigh() bool {
|
||||
logger.LogIf(ctx, err)
|
||||
return false
|
||||
}
|
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total
|
||||
high := int(usedPercent) >= gcTriggerPct
|
||||
atomic.StoreUint64(&c.stats.UsagePercent, usedPercent)
|
||||
if high {
|
||||
atomic.StoreInt32(&c.stats.UsageState, 1)
|
||||
}
|
||||
return high
|
||||
}
|
||||
|
||||
// Returns if size space can be allocated without exceeding
|
||||
// max disk usable for caching
|
||||
func (c *diskCache) diskAvailable(size int64) bool {
|
||||
di, err := disk.GetInfo(c.dir)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
if di.Total == 0 {
|
||||
logger.Info("diskCache: Received 0 total disk size")
|
||||
return false
|
||||
}
|
||||
usedPercent := (di.Total - (di.Free - uint64(size))) * 100 / di.Total
|
||||
return int(usedPercent) < c.quotaPct
|
||||
usedPercent := float64(di.Used) * 100 / float64(di.Total)
|
||||
if usedPercent >= float64(gcTriggerPct) {
|
||||
atomic.StoreInt32(&c.stats.UsageState, 1)
|
||||
c.queueGC()
|
||||
}
|
||||
atomic.StoreUint64(&c.stats.UsagePercent, uint64(usedPercent))
|
||||
|
||||
// Recalculate percentage with provided size added.
|
||||
usedPercent = float64(di.Used+uint64(size)) * 100 / float64(di.Total)
|
||||
|
||||
return usedPercent < float64(c.quotaPct)
|
||||
}
|
||||
|
||||
// queueGC will queue a GC.
|
||||
// Calling this function is always non-blocking.
|
||||
func (c *diskCache) queueGC() {
|
||||
select {
|
||||
case c.triggerGC <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// toClear returns how many bytes should be cleared to reach the low watermark quota.
|
||||
@@ -247,7 +250,7 @@ func (c *diskCache) toClear() uint64 {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
return bytesToClear(int64(di.Total), int64(di.Free), uint64(c.quotaPct), uint64(c.lowWatermark))
|
||||
return bytesToClear(int64(di.Total), int64(di.Free), uint64(c.quotaPct), uint64(c.lowWatermark), uint64(c.highWatermark))
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -658,8 +661,7 @@ func newCacheEncryptMetadata(bucket, object string, metadata map[string]string)
|
||||
|
||||
// Caches the object to disk
|
||||
func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Reader, size int64, rs *HTTPRangeSpec, opts ObjectOptions, incHitsOnly bool) error {
|
||||
if c.diskUsageHigh() {
|
||||
c.triggerGC <- struct{}{}
|
||||
if !c.diskSpaceAvailable(size) {
|
||||
io.Copy(ioutil.Discard, data)
|
||||
return errDiskFull
|
||||
}
|
||||
@@ -688,16 +690,13 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
|
||||
if rs != nil {
|
||||
return c.putRange(ctx, bucket, object, data, size, rs, opts)
|
||||
}
|
||||
if !c.diskAvailable(size) {
|
||||
if !c.diskSpaceAvailable(size) {
|
||||
return errDiskFull
|
||||
}
|
||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
var metadata = make(map[string]string)
|
||||
for k, v := range opts.UserDefined {
|
||||
metadata[k] = v
|
||||
}
|
||||
var metadata = cloneMSS(opts.UserDefined)
|
||||
var reader = data
|
||||
var actualSize = uint64(size)
|
||||
if globalCacheKMS != nil {
|
||||
@@ -719,7 +718,7 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
|
||||
|
||||
if actualSize != uint64(n) {
|
||||
removeAll(cachePath)
|
||||
return IncompleteBody{}
|
||||
return IncompleteBody{Bucket: bucket, Object: object}
|
||||
}
|
||||
return c.saveMetadata(ctx, bucket, object, metadata, n, nil, "", incHitsOnly)
|
||||
}
|
||||
@@ -730,17 +729,14 @@ func (c *diskCache) putRange(ctx context.Context, bucket, object string, data io
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !c.diskAvailable(rlen) {
|
||||
if !c.diskSpaceAvailable(rlen) {
|
||||
return errDiskFull
|
||||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
var metadata = make(map[string]string)
|
||||
for k, v := range opts.UserDefined {
|
||||
metadata[k] = v
|
||||
}
|
||||
var metadata = cloneMSS(opts.UserDefined)
|
||||
var reader = data
|
||||
var actualSize = uint64(rlen)
|
||||
// objSize is the actual size of object (with encryption overhead if any)
|
||||
@@ -766,7 +762,7 @@ func (c *diskCache) putRange(ctx context.Context, bucket, object string, data io
|
||||
}
|
||||
if actualSize != uint64(n) {
|
||||
removeAll(cachePath)
|
||||
return IncompleteBody{}
|
||||
return IncompleteBody{Bucket: bucket, Object: object}
|
||||
}
|
||||
return c.saveMetadata(ctx, bucket, object, metadata, int64(objSize), rs, cacheFile, false)
|
||||
}
|
||||
|
||||
@@ -489,9 +489,15 @@ func (f *fileScorer) queueString() string {
|
||||
// bytesToClear() returns the number of bytes to clear to reach low watermark
|
||||
// w.r.t quota given disk total and free space, quota in % allocated to cache
|
||||
// and low watermark % w.r.t allowed quota.
|
||||
func bytesToClear(total, free int64, quotaPct, lowWatermark uint64) uint64 {
|
||||
used := (total - free)
|
||||
// If the high watermark hasn't been reached 0 will be returned.
|
||||
func bytesToClear(total, free int64, quotaPct, lowWatermark, highWatermark uint64) uint64 {
|
||||
used := total - free
|
||||
quotaAllowed := total * (int64)(quotaPct) / 100
|
||||
lowWMUsage := (total * (int64)(lowWatermark*quotaPct) / (100 * 100))
|
||||
highWMUsage := total * (int64)(highWatermark*quotaPct) / (100 * 100)
|
||||
if used < highWMUsage {
|
||||
return 0
|
||||
}
|
||||
// Return bytes needed to reach low watermark.
|
||||
lowWMUsage := total * (int64)(lowWatermark*quotaPct) / (100 * 100)
|
||||
return (uint64)(math.Min(float64(quotaAllowed), math.Max(0.0, float64(used-lowWMUsage))))
|
||||
}
|
||||
|
||||
@@ -149,22 +149,26 @@ func TestNewFileScorer(t *testing.T) {
|
||||
}
|
||||
func TestBytesToClear(t *testing.T) {
|
||||
testCases := []struct {
|
||||
total int64
|
||||
free int64
|
||||
quotaPct uint64
|
||||
watermarkLow uint64
|
||||
expected uint64
|
||||
total int64
|
||||
free int64
|
||||
quotaPct uint64
|
||||
watermarkLow uint64
|
||||
watermarkHigh uint64
|
||||
expected uint64
|
||||
}{
|
||||
{1000, 800, 40, 90, 0},
|
||||
{1000, 200, 40, 90, 400},
|
||||
{1000, 400, 40, 90, 240},
|
||||
{1000, 600, 40, 90, 40},
|
||||
{1000, 600, 40, 70, 120},
|
||||
{1000, 1000, 90, 70, 0},
|
||||
{1000, 0, 90, 70, 370},
|
||||
{total: 1000, free: 800, quotaPct: 40, watermarkLow: 90, watermarkHigh: 90, expected: 0},
|
||||
{total: 1000, free: 200, quotaPct: 40, watermarkLow: 90, watermarkHigh: 90, expected: 400},
|
||||
{total: 1000, free: 400, quotaPct: 40, watermarkLow: 90, watermarkHigh: 90, expected: 240},
|
||||
{total: 1000, free: 600, quotaPct: 40, watermarkLow: 90, watermarkHigh: 90, expected: 40},
|
||||
{total: 1000, free: 600, quotaPct: 40, watermarkLow: 70, watermarkHigh: 70, expected: 120},
|
||||
{total: 1000, free: 1000, quotaPct: 90, watermarkLow: 70, watermarkHigh: 70, expected: 0},
|
||||
|
||||
// High not yet reached..
|
||||
{total: 1000, free: 250, quotaPct: 100, watermarkLow: 50, watermarkHigh: 90, expected: 0},
|
||||
{total: 1000, free: 250, quotaPct: 100, watermarkLow: 50, watermarkHigh: 90, expected: 0},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
toClear := bytesToClear(tc.total, tc.free, tc.quotaPct, tc.watermarkLow)
|
||||
toClear := bytesToClear(tc.total, tc.free, tc.quotaPct, tc.watermarkLow, tc.watermarkHigh)
|
||||
if tc.expected != toClear {
|
||||
t.Errorf("test %d expected %v, got %v", i, tc.expected, toClear)
|
||||
}
|
||||
|
||||
@@ -85,16 +85,15 @@ type cacheObjects struct {
|
||||
}
|
||||
|
||||
func (c *cacheObjects) incHitsToMeta(ctx context.Context, dcache *diskCache, bucket, object string, size int64, eTag string, rs *HTTPRangeSpec) error {
|
||||
metadata := make(map[string]string)
|
||||
metadata["etag"] = eTag
|
||||
metadata := map[string]string{"etag": eTag}
|
||||
return dcache.SaveMetadata(ctx, bucket, object, metadata, size, rs, "", true)
|
||||
}
|
||||
|
||||
// Backend metadata could have changed through server side copy - reset cache metadata if that is the case
|
||||
func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *diskCache, bucket, object string, bkObjectInfo, cacheObjInfo ObjectInfo, rs *HTTPRangeSpec) error {
|
||||
|
||||
bkMeta := make(map[string]string)
|
||||
cacheMeta := make(map[string]string)
|
||||
bkMeta := make(map[string]string, len(bkObjectInfo.UserDefined))
|
||||
cacheMeta := make(map[string]string, len(cacheObjInfo.UserDefined))
|
||||
for k, v := range bkObjectInfo.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
@@ -166,7 +165,7 @@ func (c *cacheObjects) DeleteObjects(ctx context.Context, bucket string, objects
|
||||
|
||||
// construct a metadata k-v map
|
||||
func getMetadata(objInfo ObjectInfo) map[string]string {
|
||||
metadata := make(map[string]string)
|
||||
metadata := make(map[string]string, len(objInfo.UserDefined)+4)
|
||||
metadata["etag"] = objInfo.ETag
|
||||
metadata["content-type"] = objInfo.ContentType
|
||||
if objInfo.ContentEncoding != "" {
|
||||
@@ -284,12 +283,6 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||
// Reaching here implies cache miss
|
||||
c.cacheStats.incMiss()
|
||||
|
||||
// Since we got here, we are serving the request from backend,
|
||||
// and also adding the object to the cache.
|
||||
if dcache.diskUsageHigh() {
|
||||
dcache.triggerGC <- struct{}{} // this is non-blocking
|
||||
}
|
||||
|
||||
bkReader, bkErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
||||
|
||||
if bkErr != nil {
|
||||
@@ -306,7 +299,9 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||
if cacheErr == nil {
|
||||
bkReader.ObjInfo.CacheLookupStatus = CacheHit
|
||||
}
|
||||
if !dcache.diskAvailable(objInfo.Size) {
|
||||
|
||||
// Check if we can add it without exceeding total cache size.
|
||||
if !dcache.diskSpaceAvailable(objInfo.Size) {
|
||||
return bkReader, bkErr
|
||||
}
|
||||
|
||||
@@ -338,11 +333,12 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
teeReader := io.TeeReader(bkReader, pipeWriter)
|
||||
userDefined := getMetadata(bkReader.ObjInfo)
|
||||
go func() {
|
||||
putErr := dcache.Put(ctx, bucket, object,
|
||||
io.LimitReader(pipeReader, bkReader.ObjInfo.Size),
|
||||
bkReader.ObjInfo.Size, nil, ObjectOptions{
|
||||
UserDefined: getMetadata(bkReader.ObjInfo),
|
||||
UserDefined: userDefined,
|
||||
}, false)
|
||||
// close the write end of the pipe, so the error gets
|
||||
// propagated to getObjReader
|
||||
@@ -612,9 +608,10 @@ func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r *
|
||||
}
|
||||
|
||||
// fetch from backend if there is no space on cache drive
|
||||
if !dcache.diskAvailable(size) {
|
||||
if !dcache.diskSpaceAvailable(size) {
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
}
|
||||
|
||||
if opts.ServerSideEncryption != nil {
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
@@ -721,7 +718,9 @@ func (c *cacheObjects) gc(ctx context.Context) {
|
||||
}
|
||||
for _, dcache := range c.cache {
|
||||
if dcache != nil {
|
||||
dcache.triggerGC <- struct{}{}
|
||||
// Check if there is disk.
|
||||
// Will queue a GC scan if at high watermark.
|
||||
dcache.diskSpaceAvailable(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -26,7 +27,8 @@ const (
|
||||
dynamicTimeoutIncreaseThresholdPct = 0.33 // Upper threshold for failures in order to increase timeout
|
||||
dynamicTimeoutDecreaseThresholdPct = 0.10 // Lower threshold for failures in order to decrease timeout
|
||||
dynamicTimeoutLogSize = 16
|
||||
maxDuration = 1<<63 - 1
|
||||
maxDuration = math.MaxInt64
|
||||
maxDynamicTimeout = 24 * time.Hour // Never set timeout bigger than this.
|
||||
)
|
||||
|
||||
// timeouts that are dynamically adapted based on actual usage results
|
||||
@@ -40,6 +42,12 @@ type dynamicTimeout struct {
|
||||
|
||||
// newDynamicTimeout returns a new dynamic timeout initialized with timeout value
|
||||
func newDynamicTimeout(timeout, minimum time.Duration) *dynamicTimeout {
|
||||
if timeout <= 0 || minimum <= 0 {
|
||||
panic("newDynamicTimeout: negative or zero timeout")
|
||||
}
|
||||
if minimum > timeout {
|
||||
minimum = timeout
|
||||
}
|
||||
return &dynamicTimeout{timeout: int64(timeout), minimum: int64(minimum)}
|
||||
}
|
||||
|
||||
@@ -61,60 +69,73 @@ func (dt *dynamicTimeout) LogFailure() {
|
||||
|
||||
// logEntry stores a log entry
|
||||
func (dt *dynamicTimeout) logEntry(duration time.Duration) {
|
||||
if duration < 0 {
|
||||
return
|
||||
}
|
||||
entries := int(atomic.AddInt64(&dt.entries, 1))
|
||||
index := entries - 1
|
||||
if index < dynamicTimeoutLogSize {
|
||||
dt.mutex.Lock()
|
||||
dt.log[index] = duration
|
||||
|
||||
// We leak entries while we copy
|
||||
if entries == dynamicTimeoutLogSize {
|
||||
|
||||
// Make copy on stack in order to call adjust()
|
||||
logCopy := [dynamicTimeoutLogSize]time.Duration{}
|
||||
copy(logCopy[:], dt.log[:])
|
||||
|
||||
// reset log entries
|
||||
atomic.StoreInt64(&dt.entries, 0)
|
||||
dt.mutex.Unlock()
|
||||
|
||||
dt.adjust(logCopy)
|
||||
return
|
||||
}
|
||||
dt.mutex.Unlock()
|
||||
}
|
||||
if entries == dynamicTimeoutLogSize {
|
||||
dt.mutex.Lock()
|
||||
|
||||
// Make copy on stack in order to call adjust()
|
||||
logCopy := [dynamicTimeoutLogSize]time.Duration{}
|
||||
copy(logCopy[:], dt.log[:])
|
||||
|
||||
// reset log entries
|
||||
atomic.StoreInt64(&dt.entries, 0)
|
||||
|
||||
dt.mutex.Unlock()
|
||||
|
||||
dt.adjust(logCopy)
|
||||
}
|
||||
}
|
||||
|
||||
// adjust changes the value of the dynamic timeout based on the
|
||||
// previous results
|
||||
func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) {
|
||||
|
||||
failures, average := 0, int64(0)
|
||||
for i := 0; i < len(entries); i++ {
|
||||
if entries[i] == maxDuration {
|
||||
failures, max := 0, time.Duration(0)
|
||||
for _, dur := range entries[:] {
|
||||
if dur == maxDuration {
|
||||
failures++
|
||||
} else {
|
||||
average += int64(entries[i])
|
||||
} else if dur > max {
|
||||
max = dur
|
||||
}
|
||||
}
|
||||
if failures < len(entries) {
|
||||
average /= int64(len(entries) - failures)
|
||||
}
|
||||
|
||||
timeOutHitPct := float64(failures) / float64(len(entries))
|
||||
failPct := float64(failures) / float64(len(entries))
|
||||
|
||||
if timeOutHitPct > dynamicTimeoutIncreaseThresholdPct {
|
||||
if failPct > dynamicTimeoutIncreaseThresholdPct {
|
||||
// We are hitting the timeout too often, so increase the timeout by 25%
|
||||
timeout := atomic.LoadInt64(&dt.timeout) * 125 / 100
|
||||
atomic.StoreInt64(&dt.timeout, timeout)
|
||||
} else if timeOutHitPct < dynamicTimeoutDecreaseThresholdPct {
|
||||
// We are hitting the timeout relatively few times, so decrease the timeout
|
||||
average = average * 125 / 100 // Add buffer of 25% on top of average
|
||||
|
||||
timeout := (atomic.LoadInt64(&dt.timeout) + average) / 2 // Middle between current timeout and average success
|
||||
// Set upper cap.
|
||||
if timeout > int64(maxDynamicTimeout) {
|
||||
timeout = int64(maxDynamicTimeout)
|
||||
}
|
||||
// Safety, shouldn't happen
|
||||
if timeout < dt.minimum {
|
||||
timeout = dt.minimum
|
||||
}
|
||||
atomic.StoreInt64(&dt.timeout, timeout)
|
||||
} else if failPct < dynamicTimeoutDecreaseThresholdPct {
|
||||
// We are hitting the timeout relatively few times,
|
||||
// so decrease the timeout towards 25 % of maximum time spent.
|
||||
max = max * 125 / 100
|
||||
|
||||
timeout := atomic.LoadInt64(&dt.timeout)
|
||||
if max < time.Duration(timeout) {
|
||||
// Move 50% toward the max.
|
||||
timeout = (int64(max) + timeout) / 2
|
||||
}
|
||||
if timeout < dt.minimum {
|
||||
timeout = dt.minimum
|
||||
}
|
||||
atomic.StoreInt64(&dt.timeout, timeout)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ package cmd
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -98,7 +100,7 @@ func TestDynamicTimeoutDualDecrease(t *testing.T) {
|
||||
adjustedAgain := timeout.Timeout()
|
||||
|
||||
if initial <= adjusted || adjusted <= adjustedAgain {
|
||||
t.Errorf("Failure to decrease timeout multiple times")
|
||||
t.Errorf("Failure to decrease timeout multiple times, initial: %v, adjusted: %v, again: %v", initial, adjusted, adjustedAgain)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,6 +125,30 @@ func TestDynamicTimeoutManyDecreases(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDynamicTimeoutConcurrent(t *testing.T) {
|
||||
// Race test.
|
||||
timeout := newDynamicTimeout(time.Second, time.Millisecond)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
|
||||
wg.Add(1)
|
||||
rng := rand.New(rand.NewSource(int64(i)))
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < 100; i++ {
|
||||
timeout.LogFailure()
|
||||
for j := 0; j < 100; j++ {
|
||||
timeout.LogSuccess(time.Duration(float64(time.Second) * rng.Float64()))
|
||||
}
|
||||
to := timeout.Timeout()
|
||||
if to < time.Millisecond || to > time.Second {
|
||||
panic(to)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestDynamicTimeoutHitMinimum(t *testing.T) {
|
||||
|
||||
const minimum = 30 * time.Second
|
||||
@@ -168,7 +194,7 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) {
|
||||
|
||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(0)
|
||||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
@@ -188,7 +214,7 @@ func TestDynamicTimeoutAdjustNormalized(t *testing.T) {
|
||||
|
||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(0)
|
||||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
|
||||
@@ -388,12 +388,7 @@ func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, offset,
|
||||
object: object,
|
||||
customerKeyHeader: h.Get(crypto.SSECKey),
|
||||
copySource: copySource,
|
||||
}
|
||||
|
||||
w.metadata = map[string]string{}
|
||||
// Copy encryption metadata for internal use.
|
||||
for k, v := range oi.UserDefined {
|
||||
w.metadata[k] = v
|
||||
metadata: cloneMSS(oi.UserDefined),
|
||||
}
|
||||
|
||||
if w.copySource {
|
||||
@@ -432,10 +427,7 @@ type DecryptBlocksReader struct {
|
||||
}
|
||||
|
||||
func (d *DecryptBlocksReader) buildDecrypter(partID int) error {
|
||||
m := make(map[string]string)
|
||||
for k, v := range d.metadata {
|
||||
m[k] = v
|
||||
}
|
||||
m := cloneMSS(d.metadata)
|
||||
// Initialize the first decrypter; new decrypters will be
|
||||
// initialized in Read() operation as needed.
|
||||
var key []byte
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -193,7 +194,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
|
||||
}
|
||||
|
||||
// ZoneEndpoints represent endpoints in a given zone
|
||||
// along with its setCount and drivesPerSet.
|
||||
// along with its setCount and setDriveCount.
|
||||
type ZoneEndpoints struct {
|
||||
SetCount int
|
||||
DrivesPerSet int
|
||||
@@ -203,6 +204,21 @@ type ZoneEndpoints struct {
|
||||
// EndpointZones - list of list of endpoints
|
||||
type EndpointZones []ZoneEndpoints
|
||||
|
||||
// GetLocalZoneIdx returns the zone which endpoint belongs to locally.
|
||||
// if ep is remote this code will return -1 zoneIndex
|
||||
func (l EndpointZones) GetLocalZoneIdx(ep Endpoint) int {
|
||||
for i, zep := range l {
|
||||
for _, cep := range zep.Endpoints {
|
||||
if cep.IsLocal && ep.IsLocal {
|
||||
if reflect.DeepEqual(cep, ep) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Add add zone endpoints
|
||||
func (l *EndpointZones) Add(zeps ZoneEndpoints) error {
|
||||
existSet := set.NewStringSet()
|
||||
@@ -753,9 +769,17 @@ func GetProxyEndpoints(endpointZones EndpointZones) ([]ProxyEndpoint, error) {
|
||||
RootCAs: globalRootCAs,
|
||||
}
|
||||
}
|
||||
|
||||
// allow transport to be HTTP/1.1 for proxying.
|
||||
tr := newCustomHTTP11Transport(tlsConfig, rest.DefaultTimeout)()
|
||||
|
||||
// Allow more requests to be in flight with higher response header timeout.
|
||||
tr.ResponseHeaderTimeout = 30 * time.Minute
|
||||
tr.MaxIdleConnsPerHost = 64
|
||||
|
||||
proxyEps = append(proxyEps, ProxyEndpoint{
|
||||
Endpoint: endpoint,
|
||||
Transport: newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)(),
|
||||
Transport: tr,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -49,7 +48,7 @@ func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket stri
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] != nil {
|
||||
if err := storageDisks[index].MakeVol(bucket); err != nil {
|
||||
if err := storageDisks[index].MakeVol(ctx, bucket); err != nil {
|
||||
if err != errVolumeExists {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
@@ -75,7 +74,7 @@ func undoDeleteBucket(storageDisks []StorageAPI, bucket string) {
|
||||
}
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
_ = storageDisks[index].MakeVol(bucket)
|
||||
_ = storageDisks[index].MakeVol(context.Background(), bucket)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
@@ -86,31 +85,40 @@ func undoDeleteBucket(storageDisks []StorageAPI, bucket string) {
|
||||
|
||||
// getBucketInfo - returns the BucketInfo from one of the load balanced disks.
|
||||
func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string) (bucketInfo BucketInfo, err error) {
|
||||
var bucketErrs []error
|
||||
for _, disk := range er.getLoadBalancedDisks() {
|
||||
if disk == nil {
|
||||
bucketErrs = append(bucketErrs, errDiskNotFound)
|
||||
continue
|
||||
}
|
||||
volInfo, serr := disk.StatVol(bucketName)
|
||||
if serr == nil {
|
||||
return BucketInfo(volInfo), nil
|
||||
}
|
||||
err = serr
|
||||
// For any reason disk went offline continue and pick the next one.
|
||||
if IsErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
|
||||
bucketErrs = append(bucketErrs, err)
|
||||
continue
|
||||
}
|
||||
// Any error which cannot be ignored, we return quickly.
|
||||
return BucketInfo{}, err
|
||||
storageDisks := er.getDisks()
|
||||
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
var bucketsInfo = make([]BucketInfo, len(storageDisks))
|
||||
// Undo previous make bucket entry on all underlying storage disks.
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] == nil {
|
||||
return errDiskNotFound
|
||||
}
|
||||
volInfo, err := storageDisks[index].StatVol(ctx, bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucketsInfo[index] = BucketInfo(volInfo)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
|
||||
errs := g.Wait()
|
||||
|
||||
for i, err := range errs {
|
||||
if err == nil {
|
||||
return bucketsInfo[i], nil
|
||||
}
|
||||
}
|
||||
|
||||
// If all our errors were ignored, then we try to
|
||||
// reduce to one error based on read quorum.
|
||||
// `nil` is deliberately passed for ignoredErrs
|
||||
// because these errors were already ignored.
|
||||
readQuorum := getReadQuorum(len(er.getDisks()))
|
||||
return BucketInfo{}, reduceReadQuorumErrs(ctx, bucketErrs, nil, readQuorum)
|
||||
readQuorum := getReadQuorum(len(storageDisks))
|
||||
return BucketInfo{}, reduceReadQuorumErrs(ctx, errs, nil, readQuorum)
|
||||
}
|
||||
|
||||
// GetBucketInfo - returns BucketInfo for a bucket.
|
||||
@@ -122,54 +130,6 @@ func (er erasureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi B
|
||||
return bucketInfo, nil
|
||||
}
|
||||
|
||||
// listBuckets - returns list of all buckets from a disk picked at random.
|
||||
func (er erasureObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketInfo, err error) {
|
||||
for _, disk := range er.getLoadBalancedDisks() {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
var volsInfo []VolInfo
|
||||
volsInfo, err = disk.ListVols()
|
||||
if err == nil {
|
||||
// NOTE: The assumption here is that volumes across all disks in
|
||||
// readQuorum have consistent view i.e they all have same number
|
||||
// of buckets. This is essentially not verified since healing
|
||||
// should take care of this.
|
||||
var bucketsInfo []BucketInfo
|
||||
for _, volInfo := range volsInfo {
|
||||
if isReservedOrInvalidBucket(volInfo.Name, true) {
|
||||
continue
|
||||
}
|
||||
bucketsInfo = append(bucketsInfo, BucketInfo(volInfo))
|
||||
}
|
||||
// For buckets info empty, loop once again to check
|
||||
// if we have, can happen if disks were down.
|
||||
if len(bucketsInfo) == 0 {
|
||||
continue
|
||||
}
|
||||
return bucketsInfo, nil
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
// Ignore any disks not found.
|
||||
if IsErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ListBuckets - lists all the buckets, sorted by its name.
|
||||
func (er erasureObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
||||
bucketInfos, err := er.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return nil, toObjectErr(err)
|
||||
}
|
||||
// Sort by bucket name before returning.
|
||||
sort.Sort(byBucketName(bucketInfos))
|
||||
return bucketInfos, nil
|
||||
}
|
||||
|
||||
// Dangling buckets should be handled appropriately, in this following situation
|
||||
// we actually have quorum error to be `nil` but we have some disks where
|
||||
// the bucket delete returned `errVolumeNotEmpty` but this is not correct
|
||||
@@ -182,10 +142,10 @@ func deleteDanglingBucket(ctx context.Context, storageDisks []StorageAPI, dErrs
|
||||
for index, err := range dErrs {
|
||||
if err == errVolumeNotEmpty {
|
||||
// Attempt to delete bucket again.
|
||||
if derr := storageDisks[index].DeleteVol(bucket, false); derr == errVolumeNotEmpty {
|
||||
if derr := storageDisks[index].DeleteVol(ctx, bucket, false); derr == errVolumeNotEmpty {
|
||||
_ = cleanupDir(ctx, storageDisks[index], bucket, "")
|
||||
|
||||
_ = storageDisks[index].DeleteVol(bucket, false)
|
||||
_ = storageDisks[index].DeleteVol(ctx, bucket, false)
|
||||
|
||||
// Cleanup all the previously incomplete multiparts.
|
||||
_ = cleanupDir(ctx, storageDisks[index], minioMetaMultipartBucket, bucket)
|
||||
@@ -205,7 +165,7 @@ func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, forceD
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] != nil {
|
||||
if err := storageDisks[index].DeleteVol(bucket, forceDelete); err != nil {
|
||||
if err := storageDisks[index].DeleteVol(ctx, bucket, forceDelete); err != nil {
|
||||
return err
|
||||
}
|
||||
err := cleanupDir(ctx, storageDisks[index], minioMetaMultipartBucket, bucket)
|
||||
@@ -244,7 +204,10 @@ func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, forceD
|
||||
// If we reduce quorum to nil, means we have deleted buckets properly
|
||||
// on some servers in quorum, we should look for volumeNotEmpty errors
|
||||
// and delete those buckets as well.
|
||||
deleteDanglingBucket(ctx, storageDisks, dErrs, bucket)
|
||||
//
|
||||
// let this call succeed, even if client cancels the context
|
||||
// this is to ensure that we don't leave any stale content
|
||||
deleteDanglingBucket(context.Background(), storageDisks, dErrs, bucket)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,20 +19,94 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
|
||||
func (er erasureObjects) getLoadBalancedDisks() (newDisks []StorageAPI) {
|
||||
func (er erasureObjects) getLoadBalancedLocalDisks() (newDisks []StorageAPI) {
|
||||
disks := er.getDisks()
|
||||
// Based on the random shuffling return back randomized disks.
|
||||
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
|
||||
newDisks = append(newDisks, disks[i-1])
|
||||
if disks[i-1] != nil && disks[i-1].IsLocal() {
|
||||
if !disks[i-1].Healing() && disks[i-1].IsOnline() {
|
||||
newDisks = append(newDisks, disks[i-1])
|
||||
}
|
||||
}
|
||||
}
|
||||
return newDisks
|
||||
}
|
||||
|
||||
// getLoadBalancedNDisks - fetches load balanced (sufficiently randomized) disk slice
|
||||
// with N disks online. If ndisks is zero or negative, then it will returns all disks,
|
||||
// same if ndisks is greater than the number of all disks.
|
||||
func (er erasureObjects) getLoadBalancedNDisks(ndisks int) (newDisks []StorageAPI) {
|
||||
disks := er.getLoadBalancedDisks(ndisks != -1)
|
||||
for _, disk := range disks {
|
||||
newDisks = append(newDisks, disk)
|
||||
ndisks--
|
||||
if ndisks == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
|
||||
// ensures to skip disks if they are not healing and online.
|
||||
func (er erasureObjects) getLoadBalancedDisks(optimized bool) []StorageAPI {
|
||||
disks := er.getDisks()
|
||||
|
||||
if !optimized {
|
||||
var newDisks []StorageAPI
|
||||
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
|
||||
newDisks = append(newDisks, disks[i-1])
|
||||
}
|
||||
return newDisks
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
var newDisks = map[uint64][]StorageAPI{}
|
||||
// Based on the random shuffling return back randomized disks.
|
||||
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
|
||||
i := i
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if disks[i-1] == nil {
|
||||
return
|
||||
}
|
||||
di, err := disks[i-1].DiskInfo(context.Background())
|
||||
if err != nil || di.Healing {
|
||||
// - Do not consume disks which are not reachable
|
||||
// unformatted or simply not accessible for some reason.
|
||||
//
|
||||
// - Do not consume disks which are being healed
|
||||
//
|
||||
// - Future: skip busy disks
|
||||
return
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
// Capture disks usage wise
|
||||
newDisks[di.Used] = append(newDisks[di.Used], disks[i-1])
|
||||
mu.Unlock()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
var max uint64
|
||||
for k := range newDisks {
|
||||
if k > max {
|
||||
max = k
|
||||
}
|
||||
}
|
||||
|
||||
// Return disks which have maximum disk usage common.
|
||||
return newDisks[max]
|
||||
}
|
||||
|
||||
// This function does the following check, suppose
|
||||
// object is "a/b/c/d", stat makes sure that objects ""a/b/c""
|
||||
// "a/b" and "a" do not exist.
|
||||
@@ -66,7 +140,7 @@ func (er erasureObjects) isObject(ctx context.Context, bucket, prefix string) (o
|
||||
return errDiskNotFound
|
||||
}
|
||||
// Check if 'prefix' is an object on this 'disk', else continue the check the next disk
|
||||
return storageDisks[index].CheckFile(bucket, prefix)
|
||||
return storageDisks[index].CheckFile(ctx, bucket, prefix)
|
||||
}, index)
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ func TestErasureParentDirIsObject(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize 'Erasure' object layer.")
|
||||
}
|
||||
defer obj.Shutdown(context.Background())
|
||||
|
||||
// Remove all disks.
|
||||
for _, disk := range fsDisks {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user