Merge from 'master'

This commit is contained in:
Minio Trusted
2017-05-04 17:54:34 -07:00
760 changed files with 160717 additions and 10819 deletions

View File

@@ -26,8 +26,8 @@
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
* Version used:
* Version used (`minio version`):
* Environment name and version (e.g. nginx 1.9.1):
* Server type and version:
* Operating System and version:
* Operating System and version (`uname -a`):
* Link to your project:

4
.jshintrc Normal file
View File

@@ -0,0 +1,4 @@
{
"asi": true,
"esnext": true
}

View File

@@ -15,5 +15,13 @@ RUN \
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
EXPOSE 9000
ENTRYPOINT ["minio"]
COPY buildscripts/docker-entrypoint.sh /usr/bin/
RUN chmod +x /usr/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/export"]
CMD ["minio"]

View File

@@ -15,5 +15,13 @@ RUN \
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
EXPOSE 9000
ENTRYPOINT ["minio"]
COPY buildscripts/docker-entrypoint.sh /usr/bin/
RUN chmod +x /usr/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/export"]
CMD ["minio"]

View File

@@ -15,5 +15,13 @@ RUN \
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
EXPOSE 9000
ENTRYPOINT ["minio"]
COPY buildscripts/docker-entrypoint.sh /usr/bin/
RUN chmod +x /usr/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/export"]
CMD ["minio"]

View File

@@ -56,42 +56,55 @@ endif
all: install
checks:
@echo "Checking deps:"
@echo -n "Check deps: "
@(env bash $(PWD)/buildscripts/checkdeps.sh)
@echo "Done."
@echo -n "Checking project is in GOPATH: "
@(env bash $(PWD)/buildscripts/checkgopath.sh)
@echo "Done."
getdeps: checks
@echo "Installing golint:" && go get -u github.com/golang/lint/golint
@echo "Installing gocyclo:" && go get -u github.com/fzipp/gocyclo
@echo "Installing deadcode:" && go get -u github.com/remyoudompheng/go-misc/deadcode
@echo "Installing misspell:" && go get -u github.com/client9/misspell/cmd/misspell
@echo "Installing ineffassign:" && go get -u github.com/gordonklaus/ineffassign
@echo -n "Installing golint: " && go get -u github.com/golang/lint/golint
@echo "Done."
@echo -n "Installing gocyclo: " && go get -u github.com/fzipp/gocyclo
@echo "Done."
@echo -n "Installing deadcode: " && go get -u github.com/remyoudompheng/go-misc/deadcode
@echo "Done."
@echo -n "Installing misspell: " && go get -u github.com/client9/misspell/cmd/misspell
@echo "Done."
@echo -n "Installing ineffassign: " && go get -u github.com/gordonklaus/ineffassign
@echo "Done."
verifiers: vet fmt lint cyclo spelling
vet:
@echo "Running $@:"
@go vet github.com/minio/minio/cmd/...
@go vet github.com/minio/minio/pkg/...
@echo -n "Running $@: "
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult cmd
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult pkg
@echo "Done."
fmt:
@echo "Running $@:"
@echo -n "Running $@: "
@gofmt -s -l cmd
@gofmt -s -l pkg
@echo "Done."
lint:
@echo "Running $@:"
@echo -n "Running $@: "
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd...
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg...
@echo "Done."
ineffassign:
@echo "Running $@:"
@echo -n "Running $@: "
@${GOPATH}/bin/ineffassign .
@echo "Done."
cyclo:
@echo "Running $@:"
@echo -n "Running $@: "
@${GOPATH}/bin/gocyclo -over 100 cmd
@${GOPATH}/bin/gocyclo -over 100 pkg
@echo "Done."
build: getdeps verifiers $(UI_ASSETS)
@@ -104,18 +117,21 @@ spelling:
@${GOPATH}/bin/misspell -error `find docs/`
test: build
@echo "Running all minio testing:"
@echo -n "Running all minio testing: "
@go test $(GOFLAGS) .
@go test $(GOFLAGS) github.com/minio/minio/cmd...
@go test $(GOFLAGS) github.com/minio/minio/pkg...
@echo "Done."
coverage: build
@echo "Running all coverage for minio:"
@echo -n "Running all coverage for minio: "
@./buildscripts/go-coverage.sh
@echo "Done."
gomake-all: build
@echo "Installing minio:"
@echo -n "Installing minio at $(GOPATH)/bin/minio: "
@go build --ldflags $(BUILD_LDFLAGS) -o $(GOPATH)/bin/minio
@echo "Done."
pkg-add:
${GOPATH}/bin/govendor add $(PKG)
@@ -138,7 +154,8 @@ experimental: verifiers
@MINIO_RELEASE=EXPERIMENTAL ./buildscripts/build.sh
clean:
@echo "Cleaning up all the generated files:"
@echo -n "Cleaning up all the generated files: "
@find . -name '*.test' | xargs rm -fv
@rm -rf build
@rm -rf release
@echo "Done."

View File

@@ -26,6 +26,7 @@ Install minio packages using [Homebrew](http://brew.sh/)
brew install minio/stable/minio
minio server ~/Photos
```
Note: If you are upgrading minio on macOS, please see instructions [here](https://github.com/minio/minio/blob/master/docs/minio_homebrew.md).
### Binary Download
| Platform| Architecture | URL|
@@ -40,11 +41,11 @@ chmod 755 minio
### Binary Download
| Platform| Architecture | URL|
| ----------| -------- | ------|
|GNU/Linux|64-bit Intel|https://dl.minio.io/server/minio/release/linux-amd64/minio|
||32-bit Intel|https://dl.minio.io/server/minio/release/linux-386/minio|
||32-bit ARM|https://dl.minio.io/server/minio/release/linux-arm/minio|
||64-bit ARM|https://dl.minio.io/server/minio/release/linux-arm64/minio|
||32-bit ARMv6|https://dl.minio.io/server/minio/release/linux-arm6vl/minio|
|GNU/Linux|64-bit Intel|https://dl.minio.io/server/minio/release/linux-amd64/minio |
| |32-bit Intel|https://dl.minio.io/server/minio/release/linux-386/minio |
| |32-bit ARM|https://dl.minio.io/server/minio/release/linux-arm/minio |
| |64-bit ARM|https://dl.minio.io/server/minio/release/linux-arm64/minio |
| |32-bit ARMv6|https://dl.minio.io/server/minio/release/linux-arm6vl/minio |
```sh
chmod +x minio
./minio server ~/Photos
@@ -54,32 +55,38 @@ chmod +x minio
### Binary Download
| Platform| Architecture | URL|
| ----------| -------- | ------|
|Microsoft Windows|64-bit|https://dl.minio.io/server/minio/release/windows-amd64/minio.exe|
||32-bit|https://dl.minio.io/server/minio/release/windows-386/minio.exe|
|Microsoft Windows|64-bit|https://dl.minio.io/server/minio/release/windows-amd64/minio.exe |
| |32-bit|https://dl.minio.io/server/minio/release/windows-386/minio.exe |
```sh
minio.exe server D:\Photos
```
## FreeBSD
### Port
Install minio packages using [pkg](https://github.com/freebsd/pkg)
```sh
pkg install minio
sysrc minio_enable=yes
sysrc minio_disks=/home/user/Photos
service minio start
```
### Binary Download
| Platform| Architecture | URL|
| ----------| -------- | ------|
|FreeBSD|64-bit|https://dl.minio.io/server/minio/release/freebsd-amd64/minio|
|FreeBSD|64-bit|https://dl.minio.io/server/minio/release/freebsd-amd64/minio |
```sh
chmod 755 minio
./minio server ~/Photos
```
You can run Minio on FreeBSD with FreeNAS storage-backend - see [here](https://docs.minio.io/docs/how-to-run-minio-in-freenas) for more details.
## Install from Source
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://docs.minio.io/docs/how-to-install-golang).
```sh
go get -u github.com/minio/minio
```
## Test using Minio Browser

View File

@@ -1,8 +1,9 @@
{
"presets": [
"es2015",
"react"
],
"plugins": ["transform-object-rest-spread"]
}
"presets": [
"es2015",
"react"
],
"plugins": [
"transform-object-rest-spread"
]
}

View File

@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import 'babel-polyfill'
import './less/main.less'
@@ -44,7 +45,6 @@ import Web from './js/web'
window.Web = Web
import storage from 'local-storage-fallback'
const store = applyMiddleware(thunkMiddleware)(createStore)(reducer)
const Browse = connect(state => state)(_Browse)
const Login = connect(state => state)(_Login)

View File

@@ -26,6 +26,8 @@ export const SET_BUCKETS = 'SET_BUCKETS'
export const ADD_BUCKET = 'ADD_BUCKET'
export const SET_VISIBLE_BUCKETS = 'SET_VISIBLE_BUCKETS'
export const SET_OBJECTS = 'SET_OBJECTS'
export const APPEND_OBJECTS = 'APPEND_OBJECTS'
export const RESET_OBJECTS = 'RESET_OBJECTS'
export const SET_STORAGE_INFO = 'SET_STORAGE_INFO'
export const SET_SERVER_INFO = 'SET_SERVER_INFO'
export const SHOW_MAKEBUCKET_MODAL = 'SHOW_MAKEBUCKET_MODAL'
@@ -240,15 +242,28 @@ export const setVisibleBuckets = visibleBuckets => {
}
}
export const setObjects = (objects, marker, istruncated) => {
const appendObjects = (objects, marker, istruncated) => {
return {
type: SET_OBJECTS,
type: APPEND_OBJECTS,
objects,
marker,
istruncated
}
}
export const setObjects = (objects) => {
return {
type: SET_OBJECTS,
objects,
}
}
export const resetObjects = () => {
return {
type: RESET_OBJECTS
}
}
export const setCurrentBucket = currentBucket => {
return {
type: SET_CURRENT_BUCKET,
@@ -316,7 +331,7 @@ export const listObjects = () => {
object.name = object.name.replace(`${currentPath}`, '');
return object
})
dispatch(setObjects(objects, res.nextmarker, res.istruncated))
dispatch(appendObjects(objects, res.nextmarker, res.istruncated))
dispatch(setPrefixWritable(res.writable))
dispatch(setLoadBucket(''))
dispatch(setLoadPath(''))
@@ -337,7 +352,7 @@ export const listObjects = () => {
export const selectPrefix = prefix => {
return (dispatch, getState) => {
const {currentBucket, web} = getState()
dispatch(setObjects([], "", false))
dispatch(resetObjects())
dispatch(setLoadPath(prefix))
web.ListObjects({
bucketName: currentBucket,
@@ -352,7 +367,7 @@ export const selectPrefix = prefix => {
object.name = object.name.replace(`${prefix}`, '');
return object
})
dispatch(setObjects(
dispatch(appendObjects(
objects,
res.nextmarker,
res.istruncated

View File

@@ -227,7 +227,12 @@ export default class Browse extends React.Component {
removeObject() {
const {web, dispatch, currentPath, currentBucket, deleteConfirmation, checkedObjects} = this.props
let objects = checkedObjects.length > 0 ? checkedObjects : [deleteConfirmation.object]
let objects = []
if (checkedObjects.length > 0) {
objects = checkedObjects.map(obj => `${currentPath}${obj}`)
} else {
objects = [deleteConfirmation.object]
}
web.RemoveObject({
bucketname: currentBucket,
@@ -464,11 +469,11 @@ export default class Browse extends React.Component {
</div>
<ul>
<li>
Used:
<span>Used: </span>
{ humanize.filesize(total - free) }
</li>
<li className="pull-right">
Free:
<span>Free: </span>
{ humanize.filesize(total - used) }
</li>
</ul>
@@ -721,7 +726,7 @@ export default class Browse extends React.Component {
</label>
<div className="set-expire">
<div className="set-expire-item">
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireDays', 1, shareObject.object) }></i>
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireDays', 1, shareObject.object) }></i>
<div className="set-expire-title">
Days
</div>
@@ -732,10 +737,10 @@ export default class Browse extends React.Component {
max={ 7 }
defaultValue={ 5 } />
</div>
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireDays', -1, shareObject.object) }></i>
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireDays', -1, shareObject.object) }></i>
</div>
<div className="set-expire-item">
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireHours', 1, shareObject.object) }></i>
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireHours', 1, shareObject.object) }></i>
<div className="set-expire-title">
Hours
</div>
@@ -746,10 +751,10 @@ export default class Browse extends React.Component {
max={ 23 }
defaultValue={ 0 } />
</div>
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireHours', -1, shareObject.object) }></i>
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireHours', -1, shareObject.object) }></i>
</div>
<div className="set-expire-item">
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireMins', 1, shareObject.object) }></i>
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireMins', 1, shareObject.object) }></i>
<div className="set-expire-title">
Minutes
</div>
@@ -760,9 +765,9 @@ export default class Browse extends React.Component {
max={ 59 }
defaultValue={ 0 } />
</div>
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireMins', -1, shareObject.object) }></i>
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireMins', -1, shareObject.object) }></i>
</div>
</div>
</div>
</div>
</ModalBody>
<div className="modal-footer">

View File

@@ -89,10 +89,6 @@ export default class Login extends React.Component {
{ alertBox }
<div className="l-wrap">
<form onSubmit={ this.handleSubmit.bind(this) }>
<input name="fixBrowser"
autoComplete="username"
type="text"
style={ { display: 'none' } } />
<InputGroup className="ig-dark"
label="Access Key"
id="accessKey"
@@ -102,7 +98,6 @@ export default class Login extends React.Component {
required="required"
autoComplete="username">
</InputGroup>
<input type="text" autoComplete="new-password" style={ { display: 'none' } } />
<InputGroup className="ig-dark"
label="Secret Key"
id="secretKey"

View File

@@ -77,16 +77,18 @@ export default (state = {
case actions.SET_CURRENT_BUCKET:
newState.currentBucket = action.currentBucket
break
case actions.APPEND_OBJECTS:
newState.objects = [...newState.objects, ...action.objects]
newState.marker = action.marker
newState.istruncated = action.istruncated
break
case actions.SET_OBJECTS:
if (!action.objects.length) {
newState.objects = []
newState.marker = ""
newState.istruncated = action.istruncated
} else {
newState.objects = [...action.objects]
newState.marker = action.marker
newState.istruncated = action.istruncated
}
newState.objects = [...action.objects]
break
case actions.RESET_OBJECTS:
newState.objects = []
newState.marker = ""
newState.istruncated = false
break
case actions.SET_CURRENT_PATH:
newState.currentPath = action.currentPath

View File

@@ -81,7 +81,7 @@ select.form-control {
width: 100%;
height: 40px;
border: 0;
background: transparent;
background: transparent !important;
text-align: center;
position: relative;
z-index: 1;
@@ -114,8 +114,8 @@ select.form-control {
.ig-dark {
.ig-text {
color: @white;
border-color: rgba(255,255,255,0.1);
color: @white !important;
border-color: rgba(255,255,255,0.1) !important;
}
.ig-helpers {

File diff suppressed because one or more lines are too long

View File

@@ -22,7 +22,6 @@ var purify = require("purifycss-webpack-plugin")
var exports = {
context: __dirname,
entry: [
"babel-polyfill",
path.resolve(__dirname, 'app/index.js')
],
output: {
@@ -101,7 +100,6 @@ var exports = {
if (process.env.NODE_ENV === 'dev') {
exports.entry = [
"babel-polyfill",
'webpack/hot/dev-server',
'webpack-dev-server/client?http://localhost:8080',
path.resolve(__dirname, 'app/index.js')

View File

@@ -1,10 +1,12 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
abbrev@1:
version "1.0.9"
resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135"
accepts@~1.3.3, accepts@1.3.3:
accepts@1.3.3, accepts@~1.3.3:
version "1.3.3"
resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca"
dependencies:
@@ -591,6 +593,14 @@ babel-polyfill@^6.22.0:
core-js "^2.4.0"
regenerator-runtime "^0.10.0"
babel-polyfill@^6.23.0:
version "6.23.0"
resolved "https://registry.yarnpkg.com/babel-polyfill/-/babel-polyfill-6.23.0.tgz#8364ca62df8eafb830499f699177466c3b03499d"
dependencies:
babel-runtime "^6.22.0"
core-js "^2.4.0"
regenerator-runtime "^0.10.0"
babel-preset-es2015@^6.14.0:
version "6.22.0"
resolved "https://registry.yarnpkg.com/babel-preset-es2015/-/babel-preset-es2015-6.22.0.tgz#af5a98ecb35eb8af764ad8a5a05eb36dc4386835"
@@ -690,14 +700,14 @@ babel-types@^6.19.0, babel-types@^6.22.0:
lodash "^4.2.0"
to-fast-properties "^1.0.1"
babylon@^6.11.0, babylon@^6.15.0, babylon@^6.8.0:
version "6.15.0"
resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.15.0.tgz#ba65cfa1a80e1759b0e89fb562e27dccae70348e"
babylon@6.14.1:
version "6.14.1"
resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.14.1.tgz#956275fab72753ad9b3435d7afe58f8bf0a29815"
babylon@^6.11.0, babylon@^6.15.0, babylon@^6.8.0:
version "6.15.0"
resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.15.0.tgz#ba65cfa1a80e1759b0e89fb562e27dccae70348e"
backo2@1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
@@ -722,7 +732,7 @@ base64id@1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/base64id/-/base64id-1.0.0.tgz#47688cb99bb6804f0e06d3e763b1c32e57d8e6b6"
batch@^0.5.3, batch@0.5.3:
batch@0.5.3, batch@^0.5.3:
version "0.5.3"
resolved "https://registry.yarnpkg.com/batch/-/batch-0.5.3.tgz#3f3414f380321743bfc1042f9a83ff1d5824d464"
@@ -936,7 +946,7 @@ classnames@^2.1.5, classnames@^2.2.3, classnames@^2.2.5:
version "2.2.5"
resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.2.5.tgz#fb3801d453467649ef3603c7d61a02bd129bde6d"
clean-css@^3.2.10, clean-css@3.4.x:
clean-css@3.4.x, clean-css@^3.2.10:
version "3.4.24"
resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-3.4.24.tgz#89f5a5e9da37ae02394fe049a41388abbe72c3b5"
dependencies:
@@ -1015,12 +1025,6 @@ combined-stream@^1.0.5, combined-stream@~1.0.5:
dependencies:
delayed-stream "~1.0.0"
commander@^2.2.0, commander@^2.5.0, commander@^2.8.1, commander@^2.9.0, commander@2.9.x:
version "2.9.0"
resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
dependencies:
graceful-readlink ">= 1.0.0"
commander@0.6.1:
version "0.6.1"
resolved "https://registry.yarnpkg.com/commander/-/commander-0.6.1.tgz#fa68a14f6a945d54dbbe50d8cdb3320e9e3b1a06"
@@ -1035,6 +1039,12 @@ commander@2.8.x:
dependencies:
graceful-readlink ">= 1.0.0"
commander@2.9.x, commander@^2.2.0, commander@^2.5.0, commander@^2.8.1, commander@^2.9.0:
version "2.9.0"
resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
dependencies:
graceful-readlink ">= 1.0.0"
commondir@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b"
@@ -1057,14 +1067,14 @@ component-bind@1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1"
component-emitter@~1.2.0, component-emitter@1.2.1:
version "1.2.1"
resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6"
component-emitter@1.1.2:
version "1.1.2"
resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3"
component-emitter@1.2.1, component-emitter@~1.2.0:
version "1.2.1"
resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6"
component-inherit@0.0.3:
version "0.0.3"
resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143"
@@ -1144,7 +1154,7 @@ cookie-signature@1.0.6:
version "1.0.6"
resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c"
cookie@^0.3.1, cookie@0.3.1:
cookie@0.3.1, cookie@^0.3.1:
version "0.3.1"
resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb"
@@ -1313,17 +1323,13 @@ date-now@^0.1.4:
version "0.1.4"
resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b"
debug@^0.7.4:
version "0.7.4"
resolved "https://registry.yarnpkg.com/debug/-/debug-0.7.4.tgz#06e1ea8082c2cb14e39806e22e2f6f757f92af39"
debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@2, debug@2.6.0:
debug@2, debug@2.6.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0:
version "2.6.0"
resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.0.tgz#bc596bcabe7617f11d9fa15361eded5608b8499b"
dependencies:
ms "0.7.2"
debug@~2.2.0, debug@2.2.0:
debug@2.2.0, debug@~2.2.0:
version "2.2.0"
resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da"
dependencies:
@@ -1335,6 +1341,10 @@ debug@2.3.3:
dependencies:
ms "0.7.2"
debug@^0.7.4:
version "0.7.4"
resolved "https://registry.yarnpkg.com/debug/-/debug-0.7.4.tgz#06e1ea8082c2cb14e39806e22e2f6f757f92af39"
decamelize@^1.0.0, decamelize@^1.1.1, decamelize@^1.1.2:
version "1.2.0"
resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
@@ -1391,7 +1401,7 @@ di@^0.0.1:
version "0.0.1"
resolved "https://registry.yarnpkg.com/di/-/di-0.0.1.tgz#806649326ceaa7caa3306d75d985ea2748ba913c"
diff@^1.3.2, diff@1.4.0:
diff@1.4.0, diff@^1.3.2:
version "1.4.0"
resolved "https://registry.yarnpkg.com/diff/-/diff-1.4.0.tgz#7f28d2eb9ee7b15a97efd89ce63dcfdaa3ccbabf"
@@ -1440,14 +1450,14 @@ domain-browser@^1.1.1:
version "1.1.7"
resolved "https://registry.yarnpkg.com/domain-browser/-/domain-browser-1.1.7.tgz#867aa4b093faa05f1de08c06f4d7b21fdf8698bc"
domelementtype@~1.1.1:
version "1.1.3"
resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b"
domelementtype@1:
version "1.3.0"
resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.0.tgz#b17aed82e8ab59e52dd9c19b1756e0fc187204c2"
domelementtype@~1.1.1:
version "1.1.3"
resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b"
domhandler@2.1:
version "2.1.0"
resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.1.0.tgz#d2646f5e57f6c3bab11cf6cb05d3c0acf7412594"
@@ -1587,7 +1597,7 @@ escape-html@~1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
escape-string-regexp@^1.0.2, escape-string-regexp@1.0.2:
escape-string-regexp@1.0.2, escape-string-regexp@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.2.tgz#4dbc2fe674e71949caf3fb2695ce7f2dc1d9a8d1"
@@ -1754,7 +1764,7 @@ express@^4.13.3:
utils-merge "1.0.0"
vary "~1.1.0"
extend@^3.0.0, extend@~3.0.0, extend@3.0.0:
extend@3.0.0, extend@^3.0.0, extend@~3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.0.tgz#5a474353b9f3353ddd8176dfd37b91c83a46f1d4"
@@ -1859,14 +1869,6 @@ forever-agent@~0.6.1:
version "0.6.1"
resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
form-data@~2.1.1:
version "2.1.2"
resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.2.tgz#89c3534008b97eada4cbb157d58f6f5df025eae4"
dependencies:
asynckit "^0.4.0"
combined-stream "^1.0.5"
mime-types "^2.1.12"
form-data@1.0.0-rc3:
version "1.0.0-rc3"
resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.0-rc3.tgz#d35bc62e7fbc2937ae78f948aaa0d38d90607577"
@@ -1875,6 +1877,14 @@ form-data@1.0.0-rc3:
combined-stream "^1.0.5"
mime-types "^2.1.3"
form-data@~2.1.1:
version "2.1.2"
resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.2.tgz#89c3534008b97eada4cbb157d58f6f5df025eae4"
dependencies:
asynckit "^0.4.0"
combined-stream "^1.0.5"
mime-types "^2.1.12"
formidable@~1.0.14:
version "1.0.17"
resolved "https://registry.yarnpkg.com/formidable/-/formidable-1.0.17.tgz#ef5491490f9433b705faa77249c99029ae348559"
@@ -1978,6 +1988,13 @@ glob-parent@^2.0.0:
dependencies:
is-glob "^2.0.0"
glob@3.2.11:
version "3.2.11"
resolved "https://registry.yarnpkg.com/glob/-/glob-3.2.11.tgz#4a973f635b9190f715d10987d5c00fd2815ebe3d"
dependencies:
inherits "2"
minimatch "0.3"
glob@^5.0.15:
version "5.0.15"
resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1"
@@ -2009,13 +2026,6 @@ glob@^7.0.0, glob@^7.0.5:
once "^1.3.0"
path-is-absolute "^1.0.0"
glob@3.2.11:
version "3.2.11"
resolved "https://registry.yarnpkg.com/glob/-/glob-3.2.11.tgz#4a973f635b9190f715d10987d5c00fd2815ebe3d"
dependencies:
inherits "2"
minimatch "0.3"
globals@^9.0.0:
version "9.14.0"
resolved "https://registry.yarnpkg.com/globals/-/globals-9.14.0.tgz#8859936af0038741263053b39d0e76ca241e4034"
@@ -2194,7 +2204,7 @@ humanize@0.0.9:
version "0.0.9"
resolved "https://registry.yarnpkg.com/humanize/-/humanize-0.0.9.tgz#1994ffaecdfe9c441ed2bdac7452b7bb4c9e41a4"
iconv-lite@^0.4.5, iconv-lite@0.4.15:
iconv-lite@0.4.15, iconv-lite@^0.4.5:
version "0.4.15"
resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.15.tgz#fe265a218ac6a57cfe854927e9d04c19825eddeb"
@@ -2225,7 +2235,7 @@ inflight@^1.0.4:
once "^1.3.0"
wrappy "1"
inherits@^2.0.1, inherits@~2.0.0, inherits@~2.0.1, inherits@2, inherits@2.0.3:
inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@~2.0.0, inherits@~2.0.1:
version "2.0.3"
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
@@ -2418,14 +2428,14 @@ is-typedarray@~1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
isarray@^1.0.0, isarray@~1.0.0, isarray@1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
isarray@0.0.1:
version "0.0.1"
resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
isarray@1.0.0, isarray@^1.0.0, isarray@~1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
isbinaryfile@^3.0.0:
version "3.0.2"
resolved "https://registry.yarnpkg.com/isbinaryfile/-/isbinaryfile-3.0.2.tgz#4a3e974ec0cba9004d3fc6cde7209ea69368a621"
@@ -2505,7 +2515,7 @@ json-stringify-safe@~5.0.1:
version "5.0.1"
resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
json3@^3.3.2, json3@3.3.2:
json3@3.3.2, json3@^3.3.2:
version "3.3.2"
resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.2.tgz#3c0434743df93e2f5c42aee7b19bcb483575f4e1"
@@ -2633,7 +2643,7 @@ less@^2.7.1:
request "^2.72.0"
source-map "^0.5.3"
loader-utils@^0.2.11, loader-utils@^0.2.16, loader-utils@^0.2.5, loader-utils@^0.2.7, loader-utils@~0.2.2, loader-utils@0.2.x:
loader-utils@0.2.x, loader-utils@^0.2.11, loader-utils@^0.2.16, loader-utils@^0.2.5, loader-utils@^0.2.7, loader-utils@~0.2.2:
version "0.2.16"
resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-0.2.16.tgz#f08632066ed8282835dff88dfb52704765adee6d"
dependencies:
@@ -2726,20 +2736,16 @@ lower-case@^1.1.1:
version "1.1.3"
resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-1.1.3.tgz#c92393d976793eee5ba4edb583cf8eae35bd9bfb"
lru-cache@2, lru-cache@2.2.x:
version "2.2.4"
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.2.4.tgz#6c658619becf14031d0d0b594b16042ce4dc063d"
lru-cache@^3.2.0:
version "3.2.0"
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-3.2.0.tgz#71789b3b7f5399bec8565dda38aa30d2a097efee"
dependencies:
pseudomap "^1.0.1"
lru-cache@2:
version "2.7.3"
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.7.3.tgz#6d4524e8b955f95d4f5b58851ce21dd72fb4e952"
lru-cache@2.2.x:
version "2.2.4"
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.2.4.tgz#6c658619becf14031d0d0b594b16042ce4dc063d"
macaddress@^0.2.8:
version "0.2.8"
resolved "https://registry.yarnpkg.com/macaddress/-/macaddress-0.2.8.tgz#5904dc537c39ec6dbefeae902327135fa8511f12"
@@ -2798,7 +2804,7 @@ micromatch@^2.1.5, micromatch@^2.3.11:
parse-glob "^3.0.4"
regex-cache "^0.4.2"
mime-db@^1.25.0, "mime-db@>= 1.24.0 < 2", mime-db@~1.26.0:
"mime-db@>= 1.24.0 < 2", mime-db@^1.25.0, mime-db@~1.26.0:
version "1.26.0"
resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.26.0.tgz#eaffcd0e4fc6935cf8134da246e2e6c35305adff"
@@ -2808,19 +2814,13 @@ mime-types@^2.1.12, mime-types@^2.1.13, mime-types@^2.1.3, mime-types@~2.1.11, m
dependencies:
mime-db "~1.26.0"
mime@^1.2.11, mime@^1.3.4, mime@1.3.4:
version "1.3.4"
resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53"
mime@1.2.x:
version "1.2.11"
resolved "https://registry.yarnpkg.com/mime/-/mime-1.2.11.tgz#58203eed86e3a5ef17aed2b7d9ebd47f0a60dd10"
minimatch@^3.0.0, minimatch@^3.0.2, "minimatch@2 || 3":
version "3.0.3"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.3.tgz#2a4e4090b96b2db06a9d7df01055a62a77c9b774"
dependencies:
brace-expansion "^1.0.0"
mime@1.3.4, mime@^1.2.11, mime@^1.3.4:
version "1.3.4"
resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53"
minimatch@0.3:
version "0.3.0"
@@ -2829,24 +2829,30 @@ minimatch@0.3:
lru-cache "2"
sigmund "~1.0.0"
"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2:
version "3.0.3"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.3.tgz#2a4e4090b96b2db06a9d7df01055a62a77c9b774"
dependencies:
brace-expansion "^1.0.0"
minimist@0.0.8, minimist@~0.0.1:
version "0.0.8"
resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
minimist@^1.1.1, minimist@^1.2.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284"
minimist@~0.0.1, minimist@0.0.8:
version "0.0.8"
resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
mkdirp@0.3.0:
version "0.3.0"
resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.0.tgz#1bbf5ab1ba827af23575143490426455f481fe1e"
mkdirp@^0.5.0, mkdirp@^0.5.1, "mkdirp@>=0.5 0", mkdirp@~0.5.0, mkdirp@~0.5.1, mkdirp@0.5.1:
mkdirp@0.5.1, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1:
version "0.5.1"
resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
dependencies:
minimist "0.0.8"
mkdirp@0.3.0:
version "0.3.0"
resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.0.tgz#1bbf5ab1ba827af23575143490426455f481fe1e"
mocha@^2.5.3:
version "2.5.3"
resolved "https://registry.yarnpkg.com/mocha/-/mocha-2.5.3.tgz#161be5bdeb496771eb9b35745050b622b5aefc58"
@@ -2866,14 +2872,14 @@ moment@^2.15.1:
version "2.17.1"
resolved "https://registry.yarnpkg.com/moment/-/moment-2.17.1.tgz#fed9506063f36b10f066c8b59a144d7faebe1d82"
mout@^0.11.0:
version "0.11.1"
resolved "https://registry.yarnpkg.com/mout/-/mout-0.11.1.tgz#ba3611df5f0e5b1ffbfd01166b8f02d1f5fa2b99"
"mout@>=0.9 <2.0":
version "1.0.0"
resolved "https://registry.yarnpkg.com/mout/-/mout-1.0.0.tgz#9bdf1d4af57d66d47cb353a6335a3281098e1501"
mout@^0.11.0:
version "0.11.1"
resolved "https://registry.yarnpkg.com/mout/-/mout-0.11.1.tgz#ba3611df5f0e5b1ffbfd01166b8f02d1f5fa2b99"
ms@0.7.1:
version "0.7.1"
resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.1.tgz#9cd13c03adbff25b65effde7ce864ee952017098"
@@ -3028,6 +3034,10 @@ oauth-sign@~0.8.1:
version "0.8.2"
resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43"
object-assign@4.1.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0"
object-assign@^2.0.0:
version "2.1.1"
resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-2.1.1.tgz#43c36e5d569ff8e4816c4efa8be02d26967c18aa"
@@ -3036,10 +3046,6 @@ object-assign@^4.0.1, object-assign@^4.1.0:
version "4.1.1"
resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863"
object-assign@4.1.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0"
object-component@0.0.3:
version "0.0.3"
resolved "https://registry.yarnpkg.com/object-component/-/object-component-0.0.3.tgz#f0c69aa50efc95b866c186f400a33769cb2f1291"
@@ -3518,14 +3524,14 @@ pseudomap@^1.0.1:
version "1.0.2"
resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3"
punycode@^1.2.4, punycode@^1.4.1:
version "1.4.1"
resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
punycode@1.3.2:
version "1.3.2"
resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.3.2.tgz#9653a036fb7c1ee42342f2325cceefea3926c48d"
punycode@^1.2.4, punycode@^1.4.1:
version "1.4.1"
resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
purify-css@^1.1.1:
version "1.1.9"
resolved "https://registry.yarnpkg.com/purify-css/-/purify-css-1.1.9.tgz#46c9acd8940f3076c0c346c027e286f996168357"
@@ -3548,10 +3554,6 @@ q@^1.1.2:
version "1.4.1"
resolved "https://registry.yarnpkg.com/q/-/q-1.4.1.tgz#55705bcd93c5f3673530c2c2cbc0c2b3addc286e"
qs@~6.3.0:
version "6.3.0"
resolved "https://registry.yarnpkg.com/qs/-/qs-6.3.0.tgz#f403b264f23bc01228c74131b407f18d5ea5d442"
qs@2.3.3:
version "2.3.3"
resolved "https://registry.yarnpkg.com/qs/-/qs-2.3.3.tgz#e9e85adbe75da0bbe4c8e0476a086290f863b404"
@@ -3564,6 +3566,10 @@ qs@6.2.1:
version "6.2.1"
resolved "https://registry.yarnpkg.com/qs/-/qs-6.2.1.tgz#ce03c5ff0935bc1d9d69a9f14cbd18e568d67625"
qs@~6.3.0:
version "6.3.0"
resolved "https://registry.yarnpkg.com/qs/-/qs-6.3.0.tgz#f403b264f23bc01228c74131b407f18d5ea5d442"
query-string@^3.0.0:
version "3.0.3"
resolved "https://registry.yarnpkg.com/query-string/-/query-string-3.0.3.tgz#ae2e14b4d05071d4e9b9eb4873c35b0dcd42e638"
@@ -3720,6 +3726,24 @@ react@^0.14.8:
envify "^3.0.0"
fbjs "^0.6.1"
readable-stream@1.0, readable-stream@~1.0.2:
version "1.0.34"
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c"
dependencies:
core-util-is "~1.0.0"
inherits "~2.0.1"
isarray "0.0.1"
string_decoder "~0.10.x"
readable-stream@1.0.27-1:
version "1.0.27-1"
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.27-1.tgz#6b67983c20357cefd07f0165001a16d710d91078"
dependencies:
core-util-is "~1.0.0"
inherits "~2.0.1"
isarray "0.0.1"
string_decoder "~0.10.x"
"readable-stream@^2.0.0 || ^1.1.13", readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.0.5, readable-stream@^2.1.0:
version "2.2.2"
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.2.2.tgz#a9e6fec3c7dda85f8bb1b3ba7028604556fc825e"
@@ -3732,15 +3756,6 @@ react@^0.14.8:
string_decoder "~0.10.x"
util-deprecate "~1.0.1"
readable-stream@~1.0.2, readable-stream@1.0:
version "1.0.34"
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c"
dependencies:
core-util-is "~1.0.0"
inherits "~2.0.1"
isarray "0.0.1"
string_decoder "~0.10.x"
readable-stream@~2.1.4:
version "2.1.5"
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.1.5.tgz#66fa8b720e1438b364681f2ad1a63c618448c9d0"
@@ -3753,15 +3768,6 @@ readable-stream@~2.1.4:
string_decoder "~0.10.x"
util-deprecate "~1.0.1"
readable-stream@1.0.27-1:
version "1.0.27-1"
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.27-1.tgz#6b67983c20357cefd07f0165001a16d710d91078"
dependencies:
core-util-is "~1.0.0"
inherits "~2.0.1"
isarray "0.0.1"
string_decoder "~0.10.x"
readdirp@^2.0.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-2.1.0.tgz#4ed0ad060df3073300c48440373f72d1cc642d78"
@@ -3948,7 +3954,7 @@ right-align@^0.1.1:
dependencies:
align-text "^0.1.1"
rimraf@^2.3.3, rimraf@~2.5.1, rimraf@~2.5.4, rimraf@2:
rimraf@2, rimraf@^2.3.3, rimraf@~2.5.1, rimraf@~2.5.4:
version "2.5.4"
resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.5.4.tgz#96800093cbf1a0c86bd95b4625467535c29dfa04"
dependencies:
@@ -3990,7 +3996,7 @@ rocambole-whitespace@^1.0.0:
repeat-string "^1.5.0"
rocambole-token "^1.2.1"
rocambole@^0.7.0, "rocambole@>=0.7 <2.0":
"rocambole@>=0.7 <2.0", rocambole@^0.7.0:
version "0.7.0"
resolved "https://registry.yarnpkg.com/rocambole/-/rocambole-0.7.0.tgz#f6c79505517dc42b6fb840842b8b953b0f968585"
dependencies:
@@ -4208,28 +4214,28 @@ source-map-url@~0.3.0:
version "0.3.0"
resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.3.0.tgz#7ecaf13b57bcd09da8a40c5d269db33799d4aaf9"
source-map@0.1.34:
version "0.1.34"
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.34.tgz#a7cfe89aec7b1682c3b198d0acfb47d7d090566b"
dependencies:
amdefine ">=0.0.4"
source-map@0.4.x, source-map@^0.4.2, source-map@~0.4.1:
version "0.4.4"
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b"
dependencies:
amdefine ">=0.0.4"
source-map@^0.1.38, source-map@^0.1.41:
version "0.1.43"
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.43.tgz#c24bc146ca517c1471f5dacbe2571b2b7f9e3346"
dependencies:
amdefine ">=0.0.4"
source-map@^0.4.2, source-map@~0.4.1, source-map@0.4.x:
version "0.4.4"
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b"
dependencies:
amdefine ">=0.0.4"
source-map@^0.5.0, source-map@^0.5.3, source-map@^0.5.6, source-map@~0.5.0, source-map@~0.5.1, source-map@~0.5.3:
version "0.5.6"
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412"
source-map@0.1.34:
version "0.1.34"
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.34.tgz#a7cfe89aec7b1682c3b198d0acfb47d7d090566b"
dependencies:
amdefine ">=0.0.4"
sprintf-js@~1.0.2:
version "1.0.3"
resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
@@ -4282,10 +4288,6 @@ strict-uri-encode@^1.0.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713"
string_decoder@^0.10.25, string_decoder@~0.10.x:
version "0.10.31"
resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94"
string-width@^1.0.1:
version "1.0.2"
resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3"
@@ -4294,6 +4296,10 @@ string-width@^1.0.1:
is-fullwidth-code-point "^1.0.0"
strip-ansi "^3.0.0"
string_decoder@^0.10.25, string_decoder@~0.10.x:
version "0.10.31"
resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94"
stringstream@~0.0.4:
version "0.0.5"
resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
@@ -4338,6 +4344,10 @@ superagent@^1.8.4:
readable-stream "1.0.27-1"
reduce-component "1.0.1"
supports-color@1.2.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-1.2.0.tgz#ff1ed1e61169d06b3cf2d588e188b18d8847e17e"
supports-color@^0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-0.2.0.tgz#d92de2694eb3f67323973d7ae3d8b55b4c22190a"
@@ -4356,10 +4366,6 @@ supports-color@^3.1.0, supports-color@^3.1.1, supports-color@^3.2.3:
dependencies:
has-flag "^1.0.0"
supports-color@1.2.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-1.2.0.tgz#ff1ed1e61169d06b3cf2d588e188b18d8847e17e"
svgo@^0.7.0:
version "0.7.1"
resolved "https://registry.yarnpkg.com/svgo/-/svgo-0.7.1.tgz#287320fed972cb097e72c2bb1685f96fe08f8034"
@@ -4488,7 +4494,7 @@ ua-parser-js@^0.7.9:
version "0.7.12"
resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.12.tgz#04c81a99bdd5dc52263ea29d24c6bf8d4818a4bb"
uglify-js@~2.7.3, uglify-js@2.7.x:
uglify-js@2.7.x, uglify-js@~2.7.3:
version "2.7.5"
resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.7.5.tgz#4612c0c7baaee2ba7c487de4904ae122079f2ca8"
dependencies:
@@ -4538,7 +4544,7 @@ uniqs@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/uniqs/-/uniqs-2.0.0.tgz#ffede4b36b25290696e6e165d4a59edb998e6b02"
unpipe@~1.0.0, unpipe@1.0.0:
unpipe@1.0.0, unpipe@~1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec"
@@ -4557,16 +4563,16 @@ url-loader@^0.5.7:
loader-utils "0.2.x"
mime "1.2.x"
url-parse@^1.1.1:
version "1.1.7"
resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.1.7.tgz#025cff999653a459ab34232147d89514cc87d74a"
url-parse@1.0.x:
version "1.0.5"
resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.0.5.tgz#0854860422afdcfefeb6c965c662d4800169927b"
dependencies:
querystringify "0.0.x"
requires-port "1.0.x"
url-parse@1.0.x:
version "1.0.5"
resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.0.5.tgz#0854860422afdcfefeb6c965c662d4800169927b"
url-parse@^1.1.1:
version "1.1.7"
resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.1.7.tgz#025cff999653a459ab34232147d89514cc87d74a"
dependencies:
querystringify "0.0.x"
requires-port "1.0.x"
@@ -4598,7 +4604,7 @@ util-deprecate@~1.0.1:
version "1.0.2"
resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
util@^0.10.3, util@0.10.3:
util@0.10.3, util@^0.10.3:
version "0.10.3"
resolved "https://registry.yarnpkg.com/util/-/util-0.10.3.tgz#7afb1afe50805246489e3db7fe0ed379336ac0f9"
dependencies:
@@ -4765,22 +4771,22 @@ wide-align@^1.1.0:
dependencies:
string-width "^1.0.1"
window-size@^0.1.4:
version "0.1.4"
resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.4.tgz#f8e1aa1ee5a53ec5bf151ffa09742a6ad7697876"
window-size@0.1.0:
version "0.1.0"
resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d"
wordwrap@~0.0.2:
version "0.0.3"
resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107"
window-size@^0.1.4:
version "0.1.4"
resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.4.tgz#f8e1aa1ee5a53ec5bf151ffa09742a6ad7697876"
wordwrap@0.0.2:
version "0.0.2"
resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f"
wordwrap@~0.0.2:
version "0.0.3"
resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107"
wrap-ansi@^2.0.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85"
@@ -4847,4 +4853,3 @@ yargs@~3.10.0:
yeast@0.1.2:
version "0.1.2"
resolved "https://registry.yarnpkg.com/yeast/-/yeast-0.1.2.tgz#008e06d8094320c372dbc2f8ed76a0ca6c8ac419"

View File

@@ -25,6 +25,11 @@ _init() {
OSX_VERSION="10.8"
KNAME=$(uname -s)
ARCH=$(uname -m)
case "${KNAME}" in
SunOS )
ARCH=$(isainfo -k)
;;
esac
}
## FIXME:
@@ -97,7 +102,7 @@ assert_is_supported_arch() {
assert_is_supported_os() {
case "${KNAME}" in
Linux | FreeBSD | OpenBSD | NetBSD | DragonFly )
Linux | FreeBSD | OpenBSD | NetBSD | DragonFly | SunOS )
return
;;
Darwin )

View File

@@ -16,8 +16,6 @@
#
main() {
echo "Checking project is in GOPATH:"
IFS=':' read -r -a paths <<< "$GOPATH"
for path in "${paths[@]}"; do
minio_path="$path/src/github.com/minio/minio"

View File

@@ -0,0 +1,81 @@
#!/bin/sh
#
# Minio Cloud Storage, (C) 2017 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# If command starts with an option, prepend minio.
if [ "${1}" != "minio" ]; then
if [ -n "${1}" ]; then
set -- minio "$@"
fi
fi
# Wait for all the hosts to come online and have
# their DNS entries populated properly.
docker_wait_hosts() {
hosts="$@"
num_hosts=0
# Count number of hosts in arguments.
for host in $hosts; do
[ $(echo "$host" | grep -E "^http") ] || continue
num_hosts=$((num_hosts+1))
done
if [ $num_hosts -gt 0 ]; then
echo -n "Waiting for all hosts to resolve..."
while true; do
x=0
for host in $hosts; do
[ $(echo "$host" | grep -E "^http") ] || continue
# Extract the domain.
host=$(echo $host | sed -e 's/^http[s]\?:\/\/\([^\/]\+\).*/\1/')
echo -n .
val=$(ping -c 1 $host 2>/dev/null)
if [ $? != 0 ]; then
echo "Failed to lookup $host"
continue
fi
x=$((x+1))
done
# Provided hosts same as successful hosts, should break out.
test $x -eq $num_hosts && break
echo "Failed to resolve hosts.. retrying after 1 second."
sleep 1
done
echo "All hosts are resolving proceeding to initialize Minio."
fi
}
## Look for docker secrets in default documented location.
docker_secrets_env() {
local MINIO_ACCESS_KEY_FILE="/run/secrets/access_key"
local MINIO_SECRET_KEY_FILE="/run/secrets/secret_key"
if [ -f $MINIO_ACCESS_KEY_FILE -a -f $MINIO_SECRET_KEY_FILE ]; then
if [ -f $MINIO_ACCESS_KEY_FILE ]; then
export MINIO_ACCESS_KEY="$(cat "$MINIO_ACCESS_KEY_FILE")"
fi
if [ -f $MINIO_SECRET_KEY_FILE ]; then
export MINIO_SECRET_KEY="$(cat "$MINIO_SECRET_KEY_FILE")"
fi
fi
}
## Set access env from secrets if necessary.
docker_secrets_env
## Wait for all the hosts to come online.
docker_wait_hosts "$@"
exec "$@"

View File

@@ -27,8 +27,8 @@ import (
)
func genLDFlags(version string) string {
var ldflagsStr string
ldflagsStr = "-X github.com/minio/minio/cmd.Version=" + version
ldflagsStr := "-s -w"
ldflagsStr += " -X github.com/minio/minio/cmd.Version=" + version
ldflagsStr += " -X github.com/minio/minio/cmd.ReleaseTag=" + releaseTag(version)
ldflagsStr += " -X github.com/minio/minio/cmd.CommitID=" + commitID()
ldflagsStr += " -X github.com/minio/minio/cmd.ShortCommitID=" + commitID()[:12]

View File

@@ -24,7 +24,9 @@ import (
"io/ioutil"
"net/http"
"net/url"
"path"
"strconv"
"sync"
"time"
)
@@ -49,6 +51,7 @@ const (
mgmtDryRun mgmtQueryKey = "dry-run"
mgmtUploadIDMarker mgmtQueryKey = "upload-id-marker"
mgmtMaxUploads mgmtQueryKey = "max-uploads"
mgmtUploadID mgmtQueryKey = "upload-id"
)
// ServerVersion - server version
@@ -205,14 +208,45 @@ type ServerConnStats struct {
Throughput uint64 `json:"throughput,omitempty"`
}
// ServerInfo holds the information that will be returned by ServerInfo API
type ServerInfo struct {
// ServerHTTPMethodStats holds total number of HTTP operations from/to the server,
// including the average duration the call was spent.
type ServerHTTPMethodStats struct {
Count uint64 `json:"count"`
AvgDuration string `json:"avgDuration"`
}
// ServerHTTPStats holds all type of http operations performed to/from the server
// including their average execution time.
type ServerHTTPStats struct {
TotalHEADStats ServerHTTPMethodStats `json:"totalHEADs"`
SuccessHEADStats ServerHTTPMethodStats `json:"successHEADs"`
TotalGETStats ServerHTTPMethodStats `json:"totalGETs"`
SuccessGETStats ServerHTTPMethodStats `json:"successGETs"`
TotalPUTStats ServerHTTPMethodStats `json:"totalPUTs"`
SuccessPUTStats ServerHTTPMethodStats `json:"successPUTs"`
TotalPOSTStats ServerHTTPMethodStats `json:"totalPOSTs"`
SuccessPOSTStats ServerHTTPMethodStats `json:"successPOSTs"`
TotalDELETEStats ServerHTTPMethodStats `json:"totalDELETEs"`
SuccessDELETEStats ServerHTTPMethodStats `json:"successDELETEs"`
}
// ServerInfoData holds storage, connections and other
// information of a given server.
type ServerInfoData struct {
StorageInfo StorageInfo `json:"storage"`
ConnStats ServerConnStats `json:"network"`
HTTPStats ServerHTTPStats `json:"http"`
Properties ServerProperties `json:"server"`
}
// ServerInfoHandler - GET /?server-info
// ServerInfo holds server information result of one node
type ServerInfo struct {
Error error
Addr string
Data *ServerInfoData
}
// ServerInfoHandler - GET /?info
// ----------
// Get server information
func (adminAPI adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Request) {
@@ -223,58 +257,43 @@ func (adminAPI adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *htt
return
}
// Build storage info
objLayer := newObjectLayerFn()
if objLayer == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
storage := objLayer.StorageInfo()
// Web service response
reply := make([]ServerInfo, len(globalAdminPeers))
// Build list of enabled ARNs queues
var arns []string
for queueArn := range globalEventNotifier.GetAllExternalTargets() {
arns = append(arns, queueArn)
var wg sync.WaitGroup
// Gather server information for all nodes
for i, p := range globalAdminPeers {
wg.Add(1)
// Gather information from a peer in a goroutine
go func(idx int, peer adminPeer) {
defer wg.Done()
// Initialize server info at index
reply[idx] = ServerInfo{Addr: peer.addr}
serverInfoData, err := peer.cmdRunner.ServerInfoData()
if err != nil {
errorIf(err, "Unable to get server info from %s.", peer.addr)
reply[idx].Error = err
return
}
reply[idx].Data = &serverInfoData
}(i, p)
}
// Fetch uptimes from all peers. This may fail to due to lack
// of read-quorum availability.
uptime, err := getPeerUptimes(globalAdminPeers)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
errorIf(err, "Unable to get uptime from majority of servers.")
return
}
// Build server properties information
properties := ServerProperties{
Version: Version,
CommitID: CommitID,
Region: serverConfig.GetRegion(),
SQSARN: arns,
Uptime: uptime,
}
// Build network info
connStats := ServerConnStats{
TotalInputBytes: globalConnStats.getTotalInputBytes(),
TotalOutputBytes: globalConnStats.getTotalOutputBytes(),
}
// Build the whole returned information
info := ServerInfo{
StorageInfo: storage,
ConnStats: connStats,
Properties: properties,
}
wg.Wait()
// Marshal API response
jsonBytes, err := json.Marshal(info)
jsonBytes, err := json.Marshal(reply)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
errorIf(err, "Failed to marshal storage info into json.")
return
}
// Reply with storage information (across nodes in a
// distributed setup) as json.
writeSuccessResponseJSON(w, jsonBytes)
@@ -602,6 +621,42 @@ func isDryRun(qval url.Values) bool {
return false
}
// healResult - represents result of a heal operation like
// heal-object, heal-upload.
type healResult struct {
State healState `json:"state"`
}
// healState - different states of heal operation
type healState int
const (
// healNone - none of the disks healed
healNone healState = iota
// healPartial - some disks were healed, others were offline
healPartial
// healOK - all disks were healed
healOK
)
// newHealResult - returns healResult given number of disks healed and
// number of disks offline
func newHealResult(numHealedDisks, numOfflineDisks int) healResult {
var state healState
switch {
case numHealedDisks == 0:
state = healNone
case numOfflineDisks > 0:
state = healPartial
default:
state = healOK
}
return healResult{State: state}
}
// HealObjectHandler - POST /?heal&bucket=mybucket&object=myobject&dry-run
// - x-minio-operation = object
// - bucket and object are both mandatory query parameters
@@ -644,14 +699,95 @@ func (adminAPI adminAPIHandlers) HealObjectHandler(w http.ResponseWriter, r *htt
return
}
err := objLayer.HealObject(bucket, object)
numOfflineDisks, numHealedDisks, err := objLayer.HealObject(bucket, object)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
jsonBytes, err := json.Marshal(newHealResult(numHealedDisks, numOfflineDisks))
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Return 200 on success.
writeSuccessResponseHeadersOnly(w)
writeSuccessResponseJSON(w, jsonBytes)
}
// HealUploadHandler - POST /?heal&bucket=mybucket&object=myobject&upload-id=myuploadID&dry-run
// - x-minio-operation = upload
// - bucket, object and upload-id are mandatory query parameters
// Heal a given upload, if present.
func (adminAPI adminAPIHandlers) HealUploadHandler(w http.ResponseWriter, r *http.Request) {
// Get object layer instance.
objLayer := newObjectLayerFn()
if objLayer == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
// Validate request signature.
adminAPIErr := checkRequestAuthType(r, "", "", "")
if adminAPIErr != ErrNone {
writeErrorResponse(w, adminAPIErr, r.URL)
return
}
vars := r.URL.Query()
bucket := vars.Get(string(mgmtBucket))
object := vars.Get(string(mgmtObject))
uploadID := vars.Get(string(mgmtUploadID))
uploadObj := path.Join(bucket, object, uploadID)
// Validate bucket and object names as supplied via query
// parameters.
if err := checkBucketAndObjectNames(bucket, object); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Validate the bucket and object w.r.t backend representation
// of an upload.
if err := checkBucketAndObjectNames(minioMetaMultipartBucket,
uploadObj); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Check if upload exists.
if _, err := objLayer.GetObjectInfo(minioMetaMultipartBucket,
uploadObj); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// if dry-run is set in query params then perform validations
// and return success.
if isDryRun(vars) {
writeSuccessResponseHeadersOnly(w)
return
}
//We are able to use HealObject for healing an upload since an
//ongoing upload has the same backend representation as an
//object. The 'object' corresponding to a given bucket,
//object and uploadID is
//.minio.sys/multipart/bucket/object/uploadID.
numOfflineDisks, numHealedDisks, err := objLayer.HealObject(minioMetaMultipartBucket, uploadObj)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
jsonBytes, err := json.Marshal(newHealResult(numHealedDisks, numOfflineDisks))
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Return 200 on success.
writeSuccessResponseJSON(w, jsonBytes)
}
// HealFormatHandler - POST /?heal&dry-run

View File

@@ -21,10 +21,12 @@ import (
"encoding/json"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"reflect"
"testing"
"time"
@@ -154,14 +156,9 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
}
// Initialize boot time
globalBootTime = time.Now().UTC()
globalBootTime = UTCNow()
// Set globalEndpoints for a single node XL setup.
for _, xlDir := range xlDirs {
globalEndpoints = append(globalEndpoints, &url.URL{
Path: xlDir,
})
}
globalEndpoints = mustGetNewEndpointList(xlDirs...)
// Set globalIsXL to indicate that the setup uses an erasure code backend.
globalIsXL = true
@@ -299,14 +296,8 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
// Initialize admin peers to make admin RPC calls. Note: In a
// single node setup, this degenerates to a simple function
// call under the hood.
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
if err != nil {
t.Fatalf("Failed to parse storage end point - %v", err)
}
// Set globalMinioAddr to be able to distinguish local endpoints from remote.
globalMinioAddr = eps[0].Host
initGlobalAdminPeers(eps)
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
// Setting up a go routine to simulate ServerMux's
// handleServiceSignals for stop and restart commands.
@@ -365,14 +356,8 @@ func TestServiceSetCreds(t *testing.T) {
// Initialize admin peers to make admin RPC calls. Note: In a
// single node setup, this degenerates to a simple function
// call under the hood.
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
if err != nil {
t.Fatalf("Failed to parse storage end point - %v", err)
}
// Set globalMinioAddr to be able to distinguish local endpoints from remote.
globalMinioAddr = eps[0].Host
initGlobalAdminPeers(eps)
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
credentials := serverConfig.GetCredential()
var body []byte
@@ -453,14 +438,8 @@ func TestListLocksHandler(t *testing.T) {
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls.
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
if err != nil {
t.Fatalf("Failed to parse storage end point - %v", err)
}
// Set globalMinioAddr to be able to distinguish local endpoints from remote.
globalMinioAddr = eps[0].Host
initGlobalAdminPeers(eps)
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
testCases := []struct {
bucket string
@@ -528,11 +507,7 @@ func TestClearLocksHandler(t *testing.T) {
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls.
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
if err != nil {
t.Fatalf("Failed to parse storage end point - %v", err)
}
initGlobalAdminPeers(eps)
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
testCases := []struct {
bucket string
@@ -1048,6 +1023,160 @@ func TestHealObjectHandler(t *testing.T) {
t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.statusCode, rec.Code)
}
}
}
// buildAdminRequest - helper function to build an admin API request.
func buildAdminRequest(queryVal url.Values, opHdr, method string,
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
req, err := newTestRequest(method, "/?"+queryVal.Encode(), contentLength, bodySeeker)
if err != nil {
return nil, traceError(err)
}
req.Header.Set(minioAdminOpHeader, opHdr)
cred := serverConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
return nil, traceError(err)
}
return req, nil
}
// mkHealUploadQuery - helper to build HealUploadHandler query string.
func mkHealUploadQuery(bucket, object, uploadID, dryRun string) url.Values {
queryVal := url.Values{}
queryVal.Set(string(mgmtBucket), bucket)
queryVal.Set(string(mgmtObject), object)
queryVal.Set(string(mgmtUploadID), uploadID)
queryVal.Set("heal", "")
queryVal.Set(string(mgmtDryRun), dryRun)
return queryVal
}
// TestHealUploadHandler - test for HealUploadHandler.
func TestHealUploadHandler(t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
// Create an object myobject under bucket mybucket.
bucketName := "mybucket"
objName := "myobject"
err = adminTestBed.objLayer.MakeBucket(bucketName)
if err != nil {
t.Fatalf("Failed to make bucket %s - %v", bucketName, err)
}
// Create a new multipart upload.
uploadID, err := adminTestBed.objLayer.NewMultipartUpload(bucketName, objName, nil)
if err != nil {
t.Fatalf("Failed to create a new multipart upload %s/%s - %v",
bucketName, objName, err)
}
// Upload a part.
partID := 1
_, err = adminTestBed.objLayer.PutObjectPart(bucketName, objName, uploadID,
partID, int64(len("hello")), bytes.NewReader([]byte("hello")), "", "")
if err != nil {
t.Fatalf("Failed to upload part %d of %s/%s - %v", partID,
bucketName, objName, err)
}
testCases := []struct {
bucket string
object string
dryrun string
statusCode int
}{
// 1. Valid test case.
{
bucket: bucketName,
object: objName,
statusCode: http.StatusOK,
},
// 2. Invalid bucket name.
{
bucket: `invalid\\Bucket`,
object: "myobject",
statusCode: http.StatusBadRequest,
},
// 3. Bucket not found.
{
bucket: "bucketnotfound",
object: "myobject",
statusCode: http.StatusNotFound,
},
// 4. Invalid object name.
{
bucket: bucketName,
object: `invalid\\Object`,
statusCode: http.StatusBadRequest,
},
// 5. Object not found.
{
bucket: bucketName,
object: "objectnotfound",
statusCode: http.StatusNotFound,
},
// 6. Valid test case with dry-run.
{
bucket: bucketName,
object: objName,
dryrun: "yes",
statusCode: http.StatusOK,
},
}
for i, test := range testCases {
// Prepare query params.
queryVal := mkHealUploadQuery(test.bucket, test.object, uploadID, test.dryrun)
req, err1 := buildAdminRequest(queryVal, "upload", http.MethodPost, 0, nil)
if err1 != nil {
t.Fatalf("Test %d - Failed to construct heal object request - %v", i+1, err1)
}
rec := httptest.NewRecorder()
adminTestBed.mux.ServeHTTP(rec, req)
if test.statusCode != rec.Code {
t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.statusCode, rec.Code)
}
}
sample := testCases[0]
// Modify authorization header after signing to test signature
// mismatch handling.
queryVal := mkHealUploadQuery(sample.bucket, sample.object, uploadID, sample.dryrun)
req, err := buildAdminRequest(queryVal, "upload", "POST", 0, nil)
if err != nil {
t.Fatalf("Failed to construct heal object request - %v", err)
}
// Set x-amz-date to a date different than time of signing.
req.Header.Set("x-amz-date", time.Time{}.Format(iso8601Format))
rec := httptest.NewRecorder()
adminTestBed.mux.ServeHTTP(rec, req)
if rec.Code != http.StatusForbidden {
t.Errorf("Expected %d but received %d", http.StatusBadRequest, rec.Code)
}
// Set objectAPI to nil to test Server not initialized case.
resetGlobalObjectAPI()
queryVal = mkHealUploadQuery(sample.bucket, sample.object, uploadID, sample.dryrun)
req, err = buildAdminRequest(queryVal, "upload", "POST", 0, nil)
if err != nil {
t.Fatalf("Failed to construct heal object request - %v", err)
}
rec = httptest.NewRecorder()
adminTestBed.mux.ServeHTTP(rec, req)
if rec.Code != http.StatusServiceUnavailable {
t.Errorf("Expected %d but received %d", http.StatusServiceUnavailable, rec.Code)
}
}
// TestHealFormatHandler - test for HealFormatHandler.
@@ -1061,21 +1190,11 @@ func TestHealFormatHandler(t *testing.T) {
// Prepare query params for heal-format mgmt REST API.
queryVal := url.Values{}
queryVal.Set("heal", "")
req, err := newTestRequest("POST", "/?"+queryVal.Encode(), 0, nil)
req, err := buildAdminRequest(queryVal, "format", "POST", 0, nil)
if err != nil {
t.Fatalf("Failed to construct heal object request - %v", err)
}
// Set x-minio-operation header to format.
req.Header.Set(minioAdminOpHeader, "format")
// Sign the request using signature v4.
cred := serverConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
t.Fatalf("Failed to sign heal object request - %v", err)
}
rec := httptest.NewRecorder()
adminTestBed.mux.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
@@ -1092,34 +1211,18 @@ func TestGetConfigHandler(t *testing.T) {
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls.
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
if err != nil {
t.Fatalf("Failed to parse storage end point - %v", err)
}
// Set globalMinioAddr to be able to distinguish local endpoints from remote.
globalMinioAddr = eps[0].Host
initGlobalAdminPeers(eps)
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
// Prepare query params for get-config mgmt REST API.
queryVal := url.Values{}
queryVal.Set("config", "")
req, err := newTestRequest("GET", "/?"+queryVal.Encode(), 0, nil)
req, err := buildAdminRequest(queryVal, "get", http.MethodGet, 0, nil)
if err != nil {
t.Fatalf("Failed to construct get-config object request - %v", err)
}
// Set x-minio-operation header to get.
req.Header.Set(minioAdminOpHeader, "get")
// Sign the request using signature v4.
cred := serverConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
t.Fatalf("Failed to sign heal object request - %v", err)
}
rec := httptest.NewRecorder()
adminTestBed.mux.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
@@ -1137,14 +1240,8 @@ func TestSetConfigHandler(t *testing.T) {
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls.
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
if err != nil {
t.Fatalf("Failed to parse storage end point - %v", err)
}
// Set globalMinioAddr to be able to distinguish local endpoints from remote.
globalMinioAddr = eps[0].Host
initGlobalAdminPeers(eps)
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
// SetConfigHandler restarts minio setup - need to start a
// signal receiver to receive on globalServiceSignalCh.
@@ -1154,21 +1251,12 @@ func TestSetConfigHandler(t *testing.T) {
queryVal := url.Values{}
queryVal.Set("config", "")
req, err := newTestRequest("PUT", "/?"+queryVal.Encode(), int64(len(configJSON)), bytes.NewReader(configJSON))
req, err := buildAdminRequest(queryVal, "set", http.MethodPut, int64(len(configJSON)),
bytes.NewReader(configJSON))
if err != nil {
t.Fatalf("Failed to construct get-config object request - %v", err)
}
// Set x-minio-operation header to set.
req.Header.Set(minioAdminOpHeader, "set")
// Sign the request using signature v4.
cred := serverConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
t.Fatalf("Failed to sign heal object request - %v", err)
}
rec := httptest.NewRecorder()
adminTestBed.mux.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
@@ -1186,6 +1274,58 @@ func TestSetConfigHandler(t *testing.T) {
}
}
func TestAdminServerInfo(t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls.
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
// Prepare query params for set-config mgmt REST API.
queryVal := url.Values{}
queryVal.Set("info", "")
req, err := buildAdminRequest(queryVal, "", http.MethodGet, 0, nil)
if err != nil {
t.Fatalf("Failed to construct get-config object request - %v", err)
}
rec := httptest.NewRecorder()
adminTestBed.mux.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
t.Errorf("Expected to succeed but failed with %d", rec.Code)
}
results := []ServerInfo{}
err = json.NewDecoder(rec.Body).Decode(&results)
if err != nil {
t.Fatalf("Failed to decode set config result json %v", err)
}
if len(results) == 0 {
t.Error("Expected at least one server info result")
}
for _, serverInfo := range results {
if len(serverInfo.Addr) == 0 {
t.Error("Expected server address to be non empty")
}
if serverInfo.Error != nil {
t.Errorf("Unexpected error = %v\n", serverInfo.Error)
}
if serverInfo.Data.StorageInfo.Free == 0 {
t.Error("Expected StorageInfo.Free to be non empty")
}
if serverInfo.Data.Properties.Region != globalMinioDefaultRegion {
t.Errorf("Expected %s, got %s", globalMinioDefaultRegion, serverInfo.Data.Properties.Region)
}
}
}
// TestToAdminAPIErr - test for toAdminAPIErr helper function.
func TestToAdminAPIErr(t *testing.T) {
testCases := []struct {
@@ -1219,6 +1359,11 @@ func TestToAdminAPIErr(t *testing.T) {
}
func TestWriteSetConfigResponse(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatal(err)
}
defer removeAll(rootPath)
testCases := []struct {
status bool
errs []error
@@ -1250,7 +1395,7 @@ func TestWriteSetConfigResponse(t *testing.T) {
},
}
testURL, err := url.Parse("dummy.com")
testURL, err := url.Parse("http://dummy.com")
if err != nil {
t.Fatalf("Failed to parse a place-holder url")
}
@@ -1288,9 +1433,9 @@ func TestWriteSetConfigResponse(t *testing.T) {
}
}
// mkUploadsHealQuery - helper function to construct query values for
// mkListUploadsHealQuery - helper function to construct query values for
// listUploadsHeal.
func mkUploadsHealQuery(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploadsStr string) url.Values {
func mkListUploadsHealQuery(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploadsStr string) url.Values {
queryVal := make(url.Values)
queryVal.Set("heal", "")
@@ -1319,12 +1464,13 @@ func TestListHealUploadsHandler(t *testing.T) {
defer adminTestBed.objLayer.DeleteBucket("mybucket")
testCases := []struct {
bucket string
prefix string
keyMarker string
delimiter string
maxKeys string
statusCode int
bucket string
prefix string
keyMarker string
delimiter string
maxKeys string
statusCode int
expectedResp ListMultipartUploadsResponse
}{
// 1. Valid params.
{
@@ -1334,6 +1480,14 @@ func TestListHealUploadsHandler(t *testing.T) {
delimiter: "/",
maxKeys: "10",
statusCode: http.StatusOK,
expectedResp: ListMultipartUploadsResponse{
XMLName: xml.Name{Space: "http://s3.amazonaws.com/doc/2006-03-01/", Local: "ListMultipartUploadsResult"},
Bucket: "mybucket",
KeyMarker: "prefix11",
Delimiter: "/",
Prefix: "prefix",
MaxUploads: 10,
},
},
// 2. Valid params with empty prefix.
{
@@ -1343,6 +1497,14 @@ func TestListHealUploadsHandler(t *testing.T) {
delimiter: "/",
maxKeys: "10",
statusCode: http.StatusOK,
expectedResp: ListMultipartUploadsResponse{
XMLName: xml.Name{Space: "http://s3.amazonaws.com/doc/2006-03-01/", Local: "ListMultipartUploadsResult"},
Bucket: "mybucket",
KeyMarker: "",
Delimiter: "/",
Prefix: "",
MaxUploads: 10,
},
},
// 3. Invalid params with invalid bucket.
{
@@ -1401,23 +1563,64 @@ func TestListHealUploadsHandler(t *testing.T) {
}
for i, test := range testCases {
queryVal := mkUploadsHealQuery(test.bucket, test.prefix, test.keyMarker, "", test.delimiter, test.maxKeys)
queryVal := mkListUploadsHealQuery(test.bucket, test.prefix, test.keyMarker, "", test.delimiter, test.maxKeys)
req, err := newTestRequest("GET", "/?"+queryVal.Encode(), 0, nil)
req, err := buildAdminRequest(queryVal, "list-uploads", http.MethodGet, 0, nil)
if err != nil {
t.Fatalf("Test %d - Failed to construct list uploads needing heal request - %v", i+1, err)
}
req.Header.Set(minioAdminOpHeader, "list-uploads")
cred := serverConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
t.Fatalf("Test %d - Failed to sign list uploads needing heal request - %v", i+1, err)
}
rec := httptest.NewRecorder()
adminTestBed.mux.ServeHTTP(rec, req)
if test.statusCode != rec.Code {
t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.statusCode, rec.Code)
}
// Compare result with the expected one only when we receive 200 OK
if rec.Code == http.StatusOK {
resp := rec.Result()
xmlBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Errorf("Test %d: Failed to read response %v", i+1, err)
}
var actualResult ListMultipartUploadsResponse
err = xml.Unmarshal(xmlBytes, &actualResult)
if err != nil {
t.Errorf("Test %d: Failed to unmarshal xml %v", i+1, err)
}
if !reflect.DeepEqual(test.expectedResp, actualResult) {
t.Fatalf("Test %d: Unexpected response `%+v`, expected: `%+v`", i+1, test.expectedResp, actualResult)
}
}
}
}
// Test for newHealResult helper function.
func TestNewHealResult(t *testing.T) {
testCases := []struct {
healedDisks int
offlineDisks int
state healState
}{
// 1. No disks healed, no disks offline.
{0, 0, healNone},
// 2. No disks healed, non-zero disks offline.
{0, 1, healNone},
// 3. Non-zero disks healed, no disks offline.
{1, 0, healOK},
// 4. Non-zero disks healed, non-zero disks offline.
{1, 1, healPartial},
}
for i, test := range testCases {
actual := newHealResult(test.healedDisks, test.offlineDisks)
if actual.State != test.state {
t.Errorf("Test %d: Expected %v but received %v", i+1,
test.state, actual.State)
}
}
}

View File

@@ -64,6 +64,8 @@ func registerAdminRouter(mux *router.Router) {
adminRouter.Methods("POST").Queries("heal", "").Headers(minioAdminOpHeader, "object").HandlerFunc(adminAPI.HealObjectHandler)
// Heal Format.
adminRouter.Methods("POST").Queries("heal", "").Headers(minioAdminOpHeader, "format").HandlerFunc(adminAPI.HealFormatHandler)
// Heal Uploads.
adminRouter.Methods("POST").Queries("heal", "").Headers(minioAdminOpHeader, "upload").HandlerFunc(adminAPI.HealUploadHandler)
/// Config operations

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
* Minio Cloud Storage, (C) 2014, 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import (
"encoding/json"
"errors"
"fmt"
"net/url"
"net"
"os"
"path"
"path/filepath"
@@ -28,6 +28,8 @@ import (
"sort"
"sync"
"time"
"github.com/minio/minio-go/pkg/set"
)
const (
@@ -35,7 +37,7 @@ const (
serviceRestartRPC = "Admin.Restart"
listLocksRPC = "Admin.ListLocks"
reInitDisksRPC = "Admin.ReInitDisks"
uptimeRPC = "Admin.Uptime"
serverInfoDataRPC = "Admin.ServerInfoData"
getConfigRPC = "Admin.GetConfig"
writeTmpConfigRPC = "Admin.WriteTmpConfig"
commitConfigRPC = "Admin.CommitConfig"
@@ -57,7 +59,7 @@ type adminCmdRunner interface {
Restart() error
ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error)
ReInitDisks() error
Uptime() (time.Duration, error)
ServerInfoData() (ServerInfoData, error)
GetConfig() ([]byte, error)
WriteTmpConfig(tmpFileName string, configBytes []byte) error
CommitConfig(tmpFileName string) error
@@ -110,26 +112,48 @@ func (rc remoteAdminClient) ReInitDisks() error {
return rc.Call(reInitDisksRPC, &args, &reply)
}
// Uptime - Returns the uptime of this server. Timestamp is taken
// after object layer is initialized.
func (lc localAdminClient) Uptime() (time.Duration, error) {
// ServerInfoData - Returns the server info of this server.
func (lc localAdminClient) ServerInfoData() (ServerInfoData, error) {
if globalBootTime.IsZero() {
return time.Duration(0), errServerNotInitialized
return ServerInfoData{}, errServerNotInitialized
}
return time.Now().UTC().Sub(globalBootTime), nil
// Build storage info
objLayer := newObjectLayerFn()
if objLayer == nil {
return ServerInfoData{}, errServerNotInitialized
}
storage := objLayer.StorageInfo()
var arns []string
for queueArn := range globalEventNotifier.GetAllExternalTargets() {
arns = append(arns, queueArn)
}
return ServerInfoData{
StorageInfo: storage,
ConnStats: globalConnStats.toServerConnStats(),
HTTPStats: globalHTTPStats.toServerHTTPStats(),
Properties: ServerProperties{
Uptime: UTCNow().Sub(globalBootTime),
Version: Version,
CommitID: CommitID,
SQSARN: arns,
Region: serverConfig.GetRegion(),
},
}, nil
}
// Uptime - returns the uptime of the server to which the RPC call is made.
func (rc remoteAdminClient) Uptime() (time.Duration, error) {
// ServerInfo - returns the server info of the server to which the RPC call is made.
func (rc remoteAdminClient) ServerInfoData() (ServerInfoData, error) {
args := AuthRPCArgs{}
reply := UptimeReply{}
err := rc.Call(uptimeRPC, &args, &reply)
reply := ServerInfoDataReply{}
err := rc.Call(serverInfoDataRPC, &args, &reply)
if err != nil {
return time.Duration(0), err
return ServerInfoData{}, err
}
return reply.Uptime, nil
return reply.ServerInfoData, nil
}
// GetConfig - returns config.json of the local server.
@@ -211,52 +235,43 @@ type adminPeer struct {
type adminPeers []adminPeer
// makeAdminPeers - helper function to construct a collection of adminPeer.
func makeAdminPeers(eps []*url.URL) adminPeers {
var servicePeers []adminPeer
// map to store peers that are already added to ret
seenAddr := make(map[string]bool)
// add local (self) as peer in the array
servicePeers = append(servicePeers, adminPeer{
globalMinioAddr,
func makeAdminPeers(endpoints EndpointList) (adminPeerList adminPeers) {
thisPeer := globalMinioAddr
if globalMinioHost == "" {
thisPeer = net.JoinHostPort("localhost", globalMinioPort)
}
adminPeerList = append(adminPeerList, adminPeer{
thisPeer,
localAdminClient{},
})
seenAddr[globalMinioAddr] = true
serverCred := serverConfig.GetCredential()
// iterate over endpoints to find new remote peers and add
// them to ret.
for _, ep := range eps {
if ep.Host == "" {
hostSet := set.CreateStringSet(globalMinioAddr)
cred := serverConfig.GetCredential()
serviceEndpoint := path.Join(minioReservedBucketPath, adminPath)
for _, host := range GetRemotePeers(endpoints) {
if hostSet.Contains(host) {
continue
}
// Check if the remote host has been added already
if !seenAddr[ep.Host] {
cfg := authConfig{
accessKey: serverCred.AccessKey,
secretKey: serverCred.SecretKey,
serverAddr: ep.Host,
hostSet.Add(host)
adminPeerList = append(adminPeerList, adminPeer{
addr: host,
cmdRunner: &remoteAdminClient{newAuthRPCClient(authConfig{
accessKey: cred.AccessKey,
secretKey: cred.SecretKey,
serverAddr: host,
serviceEndpoint: serviceEndpoint,
secureConn: globalIsSSL,
serviceEndpoint: path.Join(minioReservedBucketPath, adminPath),
serviceName: "Admin",
}
servicePeers = append(servicePeers, adminPeer{
addr: ep.Host,
cmdRunner: &remoteAdminClient{newAuthRPCClient(cfg)},
})
seenAddr[ep.Host] = true
}
})},
})
}
return servicePeers
return adminPeerList
}
// Initialize global adminPeer collection.
func initGlobalAdminPeers(eps []*url.URL) {
globalAdminPeers = makeAdminPeers(eps)
func initGlobalAdminPeers(endpoints EndpointList) {
globalAdminPeers = makeAdminPeers(endpoints)
}
// invokeServiceCmd - Invoke Restart command.
@@ -380,7 +395,7 @@ func getPeerUptimes(peers adminPeers) (time.Duration, error) {
// the setup is the uptime of the single minio server
// instance.
if !globalIsDistXL {
return time.Now().UTC().Sub(globalBootTime), nil
return UTCNow().Sub(globalBootTime), nil
}
uptimes := make(uptimeSlice, len(peers))
@@ -391,7 +406,8 @@ func getPeerUptimes(peers adminPeers) (time.Duration, error) {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
uptimes[idx].uptime, uptimes[idx].err = peer.cmdRunner.Uptime()
serverInfoData, rpcErr := peer.cmdRunner.ServerInfoData()
uptimes[idx].uptime, uptimes[idx].err = serverInfoData.Properties.Uptime, rpcErr
}(i, peer)
}
wg.Wait()

View File

@@ -245,7 +245,7 @@ func TestGetValidServerConfig(t *testing.T) {
// Invalid config - no quorum.
serverConfigs = []serverConfigV13{c1, c2, c2, c1}
validConfig, err = getValidServerConfig(serverConfigs, noErrs)
_, err = getValidServerConfig(serverConfigs, noErrs)
if err != errXLWriteQuorum {
t.Errorf("Expected to fail due to lack of quorum but received %v", err)
}
@@ -253,7 +253,7 @@ func TestGetValidServerConfig(t *testing.T) {
// All errors
allErrs := []error{errDiskNotFound, errDiskNotFound, errDiskNotFound, errDiskNotFound}
serverConfigs = []serverConfigV13{{}, {}, {}, {}}
validConfig, err = getValidServerConfig(serverConfigs, allErrs)
_, err = getValidServerConfig(serverConfigs, allErrs)
if err != errXLWriteQuorum {
t.Errorf("Expected to fail due to lack of quorum but received %v", err)
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -53,10 +53,10 @@ type ListLocksReply struct {
volLocks []VolumeLockInfo
}
// UptimeReply - wraps the uptime response over RPC.
type UptimeReply struct {
// ServerInfoDataReply - wraps the server info response over RPC.
type ServerInfoDataReply struct {
AuthRPCReply
Uptime time.Duration
ServerInfoData ServerInfoData
}
// ConfigReply - wraps the server config response over RPC.
@@ -122,8 +122,8 @@ func (s *adminCmd) ReInitDisks(args *AuthRPCArgs, reply *AuthRPCReply) error {
return nil
}
// Uptime - returns the time when object layer was initialized on this server.
func (s *adminCmd) Uptime(args *AuthRPCArgs, reply *UptimeReply) error {
// ServerInfo - returns the server info when object layer was initialized on this server.
func (s *adminCmd) ServerInfoData(args *AuthRPCArgs, reply *ServerInfoDataReply) error {
if err := args.IsAuthenticated(); err != nil {
return err
}
@@ -132,12 +132,29 @@ func (s *adminCmd) Uptime(args *AuthRPCArgs, reply *UptimeReply) error {
return errServerNotInitialized
}
// N B The uptime is computed assuming that the system time is
// monotonic. This is not the case in time pkg in Go, see
// https://github.com/golang/go/issues/12914. This is expected
// to be fixed by go1.9.
*reply = UptimeReply{
Uptime: time.Now().UTC().Sub(globalBootTime),
// Build storage info
objLayer := newObjectLayerFn()
if objLayer == nil {
return errServerNotInitialized
}
storageInfo := objLayer.StorageInfo()
var arns []string
for queueArn := range globalEventNotifier.GetAllExternalTargets() {
arns = append(arns, queueArn)
}
reply.ServerInfoData = ServerInfoData{
Properties: ServerProperties{
Uptime: UTCNow().Sub(globalBootTime),
Version: Version,
CommitID: CommitID,
Region: serverConfig.GetRegion(),
SQSARN: arns,
},
StorageInfo: storageInfo,
ConnStats: globalConnStats.toServerConnStats(),
HTTPStats: globalHTTPStats.toServerHTTPStats(),
}
return nil

View File

@@ -18,9 +18,7 @@ package cmd
import (
"encoding/json"
"net/url"
"testing"
"time"
)
func testAdminCmd(cmd cmdType, t *testing.T) {
@@ -40,7 +38,7 @@ func testAdminCmd(cmd cmdType, t *testing.T) {
Username: creds.AccessKey,
Password: creds.SecretKey,
Version: Version,
RequestTime: time.Now().UTC(),
RequestTime: UTCNow(),
}
reply := LoginRPCReply{}
err = adminServer.Login(&args, &reply)
@@ -87,9 +85,7 @@ func TestReInitDisks(t *testing.T) {
defer removeRoots(xlDirs)
// Set globalEndpoints for a single node XL setup.
for _, xlDir := range xlDirs {
globalEndpoints = append(globalEndpoints, &url.URL{Path: xlDir})
}
globalEndpoints = mustGetNewEndpointList(xlDirs...)
// Setup admin rpc server for an XL backend.
globalIsXL = true
@@ -99,7 +95,7 @@ func TestReInitDisks(t *testing.T) {
Username: creds.AccessKey,
Password: creds.SecretKey,
Version: Version,
RequestTime: time.Now().UTC(),
RequestTime: UTCNow(),
}
reply := LoginRPCReply{}
err = adminServer.Login(&args, &reply)
@@ -124,7 +120,7 @@ func TestReInitDisks(t *testing.T) {
Username: creds.AccessKey,
Password: creds.SecretKey,
Version: Version,
RequestTime: time.Now().UTC(),
RequestTime: UTCNow(),
}
fsReply := LoginRPCReply{}
err = fsAdminServer.Login(&fsArgs, &fsReply)
@@ -161,7 +157,7 @@ func TestGetConfig(t *testing.T) {
Username: creds.AccessKey,
Password: creds.SecretKey,
Version: Version,
RequestTime: time.Now().UTC(),
RequestTime: UTCNow(),
}
reply := LoginRPCReply{}
err = adminServer.Login(&args, &reply)
@@ -205,7 +201,7 @@ func TestWriteAndCommitConfig(t *testing.T) {
Username: creds.AccessKey,
Password: creds.SecretKey,
Version: Version,
RequestTime: time.Now().UTC(),
RequestTime: UTCNow(),
}
reply := LoginRPCReply{}
err = adminServer.Login(&args, &reply)

View File

@@ -114,6 +114,7 @@ const (
ErrInvalidQueryParams
ErrBucketAlreadyOwnedByYou
ErrInvalidDuration
ErrNotSupported
// Add new error codes here.
// Bucket notification related errors.
@@ -666,6 +667,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrInvalidBucketName
case BucketNotFound:
apiErr = ErrNoSuchBucket
case BucketAlreadyOwnedByYou:
apiErr = ErrBucketAlreadyOwnedByYou
case BucketNotEmpty:
apiErr = ErrBucketNotEmpty
case BucketExists:
@@ -698,8 +701,12 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrEntityTooLarge
case ObjectTooSmall:
apiErr = ErrEntityTooSmall
case NotSupported:
apiErr = ErrNotSupported
case NotImplemented:
apiErr = ErrNotImplemented
case PolicyNotFound:
apiErr = ErrNoSuchBucketPolicy
default:
apiErr = ErrInternalError
}

View File

@@ -103,6 +103,10 @@ func TestAPIErrCode(t *testing.T) {
StorageFull{},
ErrStorageFull,
},
{
NotSupported{},
ErrNotSupported,
},
{
NotImplemented{},
ErrNotImplemented,

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2015,2016 Minio, Inc.
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -34,8 +34,9 @@ func mustGetRequestID(t time.Time) string {
// Write http common headers
func setCommonHeaders(w http.ResponseWriter) {
// Set unique request ID for each reply.
w.Header().Set(responseRequestIDKey, mustGetRequestID(time.Now().UTC()))
w.Header().Set(responseRequestIDKey, mustGetRequestID(UTCNow()))
w.Header().Set("Server", globalServerUserAgent)
w.Header().Set("X-Amz-Bucket-Region", serverConfig.GetRegion())
w.Header().Set("Accept-Ranges", "bytes")
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,12 +18,11 @@ package cmd
import (
"testing"
"time"
)
func TestNewRequestID(t *testing.T) {
// Ensure that it returns an alphanumeric result of length 16.
var id = mustGetRequestID(time.Now().UTC())
var id = mustGetRequestID(UTCNow())
if len(id) != 16 {
t.Fail()

View File

@@ -64,7 +64,6 @@ func isRequestPostPolicySignatureV4(r *http.Request) bool {
// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation.
func isRequestSignStreamingV4(r *http.Request) bool {
return r.Header.Get("x-amz-content-sha256") == streamingContentSHA256 &&
r.Header.Get("content-encoding") == streamingContentEncoding &&
r.Method == httpPUT
}
@@ -143,19 +142,16 @@ func isReqAuthenticatedV2(r *http.Request) (s3Error APIErrorCode) {
return doesPresignV2SignatureMatch(r)
}
func reqSignatureV4Verify(r *http.Request) (s3Error APIErrorCode) {
sha256sum := r.Header.Get("X-Amz-Content-Sha256")
// Skips calculating sha256 on the payload on server,
// if client requested for it.
if skipContentSha256Cksum(r) {
sha256sum = unsignedPayload
func reqSignatureV4Verify(r *http.Request, region string) (s3Error APIErrorCode) {
sha256sum := getContentSha256Cksum(r)
switch {
case isRequestSignatureV4(r):
return doesSignatureMatch(sha256sum, r, region)
case isRequestPresignedSignatureV4(r):
return doesPresignedSignatureMatch(sha256sum, r, region)
default:
return ErrAccessDenied
}
if isRequestSignatureV4(r) {
return doesSignatureMatch(sha256sum, r, serverConfig.GetRegion())
} else if isRequestPresignedSignatureV4(r) {
return doesPresignedSignatureMatch(sha256sum, r, serverConfig.GetRegion())
}
return ErrAccessDenied
}
// Verify if request has valid AWS Signature Version '4'.
@@ -163,32 +159,39 @@ func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) {
if r == nil {
return ErrInternalError
}
if errCode := reqSignatureV4Verify(r, region); errCode != ErrNone {
return errCode
}
payload, err := ioutil.ReadAll(r.Body)
if err != nil {
errorIf(err, "Unable to read request body for signature verification")
return ErrInternalError
}
// Populate back the payload.
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
// Verify Content-Md5, if payload is set.
if r.Header.Get("Content-Md5") != "" {
if r.Header.Get("Content-Md5") != getMD5HashBase64(payload) {
return ErrBadDigest
}
}
// Populate back the payload.
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
// Skips calculating sha256 on the payload on server, if client requested for it.
var sha256sum string
if skipContentSha256Cksum(r) {
sha256sum = unsignedPayload
} else {
sha256sum = getSHA256Hash(payload)
return ErrNone
}
if isRequestSignatureV4(r) {
return doesSignatureMatch(sha256sum, r, region)
} else if isRequestPresignedSignatureV4(r) {
return doesPresignedSignatureMatch(sha256sum, r, region)
// Verify that X-Amz-Content-Sha256 Header == sha256(payload)
// If X-Amz-Content-Sha256 header is not sent then we don't calculate/verify sha256(payload)
sum := r.Header.Get("X-Amz-Content-Sha256")
if isRequestPresignedSignatureV4(r) {
sum = r.URL.Query().Get("X-Amz-Content-Sha256")
}
return ErrAccessDenied
if sum != "" && sum != getSHA256Hash(payload) {
return ErrContentSHA256Mismatch
}
return ErrNone
}
// authHandler - handles all the incoming authorization headers and validates them if possible.

View File

@@ -308,6 +308,16 @@ func mustNewSignedRequest(method string, urlStr string, contentLength int64, bod
return req
}
func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
req.Header.Set("Content-Md5", "YWFhYWFhYWFhYWFhYWFhCg==")
cred := serverConfig.GetCredential()
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
t.Fatalf("Unable to initialized new signed http request %s", err)
}
return req
}
// Tests is requested authenticated function, tests replies for s3 errors.
func TestIsReqAuthenticated(t *testing.T) {
path, err := newTestConfig(globalMinioDefaultRegion)
@@ -333,16 +343,13 @@ func TestIsReqAuthenticated(t *testing.T) {
// When request is unsigned, access denied is returned.
{mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
// When request is properly signed, but has bad Content-MD5 header.
{mustNewSignedRequest("PUT", "http://127.0.0.1:9000", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
{mustNewSignedBadMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
// When request is properly signed, error is none.
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
}
// Validates all testcases.
for _, testCase := range testCases {
if testCase.s3Error == ErrBadDigest {
testCase.req.Header.Set("Content-Md5", "garbage")
}
if s3Error := isReqAuthenticated(testCase.req, serverConfig.GetRegion()); s3Error != testCase.s3Error {
t.Fatalf("Unexpected s3error returned wanted %d, got %d", testCase.s3Error, s3Error)
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -93,7 +93,7 @@ func (authClient *AuthRPCClient) Login() (err error) {
Username: authClient.config.accessKey,
Password: authClient.config.secretKey,
Version: Version,
RequestTime: time.Now().UTC(),
RequestTime: UTCNow(),
}
reply := LoginRPCReply{}

View File

@@ -23,7 +23,6 @@ import (
"math/rand"
"strconv"
"testing"
"time"
humanize "github.com/dustin/go-humanize"
)
@@ -245,7 +244,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
func getRandomByte() []byte {
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
// seeding the random number generator.
rand.Seed(time.Now().UTC().UnixNano())
rand.Seed(UTCNow().UnixNano())
var b byte
// pick a character randomly.
b = letterBytes[rand.Intn(len(letterBytes))]

68
cmd/browser-flag.go Normal file
View File

@@ -0,0 +1,68 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
)
// BrowserFlag - wrapper bool type.
type BrowserFlag bool
// String - returns string of BrowserFlag.
func (bf BrowserFlag) String() string {
if bf {
return "on"
}
return "off"
}
// MarshalJSON - converts BrowserFlag into JSON data.
func (bf BrowserFlag) MarshalJSON() ([]byte, error) {
return json.Marshal(bf.String())
}
// UnmarshalJSON - parses given data into BrowserFlag.
func (bf *BrowserFlag) UnmarshalJSON(data []byte) (err error) {
var s string
if err = json.Unmarshal(data, &s); err == nil {
b := BrowserFlag(true)
if s == "" {
// Empty string is treated as valid.
*bf = b
} else if b, err = ParseBrowserFlag(s); err == nil {
*bf = b
}
}
return err
}
// ParseBrowserFlag - parses string into BrowserFlag.
func ParseBrowserFlag(s string) (bf BrowserFlag, err error) {
if s == "on" {
bf = true
} else if s == "off" {
bf = false
} else {
err = fmt.Errorf("invalid value %s for BrowserFlag", s)
}
return bf, err
}

137
cmd/browser-flag_test.go Normal file
View File

@@ -0,0 +1,137 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"errors"
"testing"
)
// Test BrowserFlag.String()
func TestBrowserFlagString(t *testing.T) {
var bf BrowserFlag
testCases := []struct {
flag BrowserFlag
expectedResult string
}{
{bf, "off"},
{BrowserFlag(true), "on"},
{BrowserFlag(false), "off"},
}
for _, testCase := range testCases {
str := testCase.flag.String()
if testCase.expectedResult != str {
t.Fatalf("expected: %v, got: %v", testCase.expectedResult, str)
}
}
}
// Test BrowserFlag.MarshalJSON()
func TestBrowserFlagMarshalJSON(t *testing.T) {
var bf BrowserFlag
testCases := []struct {
flag BrowserFlag
expectedResult string
}{
{bf, `"off"`},
{BrowserFlag(true), `"on"`},
{BrowserFlag(false), `"off"`},
}
for _, testCase := range testCases {
data, _ := testCase.flag.MarshalJSON()
if testCase.expectedResult != string(data) {
t.Fatalf("expected: %v, got: %v", testCase.expectedResult, string(data))
}
}
}
// Test BrowserFlag.UnmarshalJSON()
func TestBrowserFlagUnmarshalJSON(t *testing.T) {
testCases := []struct {
data []byte
expectedResult BrowserFlag
expectedErr error
}{
{[]byte(`{}`), BrowserFlag(false), errors.New("json: cannot unmarshal object into Go value of type string")},
{[]byte(`["on"]`), BrowserFlag(false), errors.New("json: cannot unmarshal array into Go value of type string")},
{[]byte(`"junk"`), BrowserFlag(false), errors.New("invalid value junk for BrowserFlag")},
{[]byte(`"true"`), BrowserFlag(false), errors.New("invalid value true for BrowserFlag")},
{[]byte(`"false"`), BrowserFlag(false), errors.New("invalid value false for BrowserFlag")},
{[]byte(`"ON"`), BrowserFlag(false), errors.New("invalid value ON for BrowserFlag")},
{[]byte(`"OFF"`), BrowserFlag(false), errors.New("invalid value OFF for BrowserFlag")},
{[]byte(`""`), BrowserFlag(true), nil},
{[]byte(`"on"`), BrowserFlag(true), nil},
{[]byte(`"off"`), BrowserFlag(false), nil},
}
for _, testCase := range testCases {
var flag BrowserFlag
err := (&flag).UnmarshalJSON(testCase.data)
if testCase.expectedErr == nil {
if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else if testCase.expectedErr.Error() != err.Error() {
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
if err == nil && testCase.expectedResult != flag {
t.Fatalf("result: expected: %v, got: %v", testCase.expectedResult, flag)
}
}
}
// Test ParseBrowserFlag()
func TestParseBrowserFlag(t *testing.T) {
testCases := []struct {
flagStr string
expectedResult BrowserFlag
expectedErr error
}{
{"", BrowserFlag(false), errors.New("invalid value for BrowserFlag")},
{"junk", BrowserFlag(false), errors.New("invalid value junk for BrowserFlag")},
{"true", BrowserFlag(false), errors.New("invalid value true for BrowserFlag")},
{"false", BrowserFlag(false), errors.New("invalid value false for BrowserFlag")},
{"ON", BrowserFlag(false), errors.New("invalid value ON for BrowserFlag")},
{"OFF", BrowserFlag(false), errors.New("invalid value OFF for BrowserFlag")},
{"on", BrowserFlag(true), nil},
{"off", BrowserFlag(false), nil},
}
for _, testCase := range testCases {
bf, err := ParseBrowserFlag(testCase.flagStr)
if testCase.expectedErr == nil {
if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else if testCase.expectedErr.Error() != err.Error() {
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
if err == nil && testCase.expectedResult != bf {
t.Fatalf("result: expected: %v, got: %v", testCase.expectedResult, bf)
}
}
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,7 +19,6 @@ package cmd
import (
"path"
"testing"
"time"
)
// API suite container common to both FS and XL.
@@ -96,7 +95,7 @@ func (s *TestRPCBrowserPeerSuite) testBrowserPeerRPC(t *testing.T) {
Username: creds.AccessKey,
Password: creds.SecretKey,
Version: Version,
RequestTime: time.Now().UTC(),
RequestTime: UTCNow(),
}
rreply := &LoginRPCReply{}
err = rclient.Call("BrowserPeer"+loginMethodName, rargs, rreply)
@@ -111,7 +110,7 @@ func (s *TestRPCBrowserPeerSuite) testBrowserPeerRPC(t *testing.T) {
Username: creds.AccessKey,
Password: creds.SecretKey,
Version: Version,
RequestTime: time.Now().UTC(),
RequestTime: UTCNow(),
}
rreply = &LoginRPCReply{}
err = rclient.Call("BrowserPeer"+loginMethodName, rargs, rreply)

View File

@@ -20,6 +20,7 @@ import (
"encoding/base64"
"encoding/xml"
"io"
"net"
"net/http"
"net/url"
"path"
@@ -322,6 +323,13 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
// Get host and port from Request.RemoteAddr failing which
// fill them with empty strings.
host, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
host, port = "", ""
}
// Notify deleted event for objects.
for _, dobj := range deletedObjects {
eventNotify(eventData{
@@ -331,6 +339,9 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
Name: dobj.ObjectName,
},
ReqParams: extractReqParams(r),
UserAgent: r.UserAgent(),
Host: host,
Port: port,
})
}
}
@@ -359,13 +370,20 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
vars := mux.Vars(r)
bucket := vars["bucket"]
// Validate if incoming location constraint is valid, reject
// requests which do not follow valid region requirements.
if s3Error := isValidLocationConstraint(r); s3Error != ErrNone {
// Parse incoming location constraint.
location, s3Error := parseLocationConstraint(r)
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
// Validate if location sent by the client is valid, reject
// requests which do not follow valid region requirements.
if !isValidLocation(location) {
writeErrorResponse(w, ErrInvalidRegion, r.URL)
return
}
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.Lock()
defer bucketLock.Unlock()
@@ -520,12 +538,21 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
w.Header().Set("ETag", `"`+objInfo.MD5Sum+`"`)
w.Header().Set("Location", getObjectLocation(bucket, object))
// Get host and port from Request.RemoteAddr.
host, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
host, port = "", ""
}
// Notify object created event.
defer eventNotify(eventData{
Type: ObjectCreatedPost,
Bucket: objInfo.Bucket,
ObjInfo: objInfo,
ReqParams: extractReqParams(r),
UserAgent: r.UserAgent(),
Host: host,
Port: port,
})
if successRedirect != "" {

View File

@@ -94,6 +94,10 @@ const (
ObjectCreatedCompleteMultipartUpload
// ObjectRemovedDelete is s3:ObjectRemoved:Delete
ObjectRemovedDelete
// ObjectAccessedGet is s3:ObjectAccessed:Get
ObjectAccessedGet
// ObjectAccessedHead is s3:ObjectAccessed:Head
ObjectAccessedHead
)
// Stringer interface for event name.
@@ -109,6 +113,10 @@ func (eventName EventName) String() string {
return "s3:ObjectCreated:CompleteMultipartUpload"
case ObjectRemovedDelete:
return "s3:ObjectRemoved:Delete"
case ObjectAccessedGet:
return "s3:ObjectAccessed:Get"
case ObjectAccessedHead:
return "s3:ObjectAccessed:Head"
default:
return "s3:Unknown"
}
@@ -128,11 +136,13 @@ type bucketMeta struct {
// Notification event object metadata.
type objectMeta struct {
Key string `json:"key"`
Size int64 `json:"size,omitempty"`
ETag string `json:"eTag,omitempty"`
VersionID string `json:"versionId,omitempty"`
Sequencer string `json:"sequencer"`
Key string `json:"key"`
Size int64 `json:"size,omitempty"`
ETag string `json:"eTag,omitempty"`
ContentType string `json:"contentType,omitempty"`
UserDefined map[string]string `json:"userDefined,omitempty"`
VersionID string `json:"versionId,omitempty"`
Sequencer string `json:"sequencer"`
}
const (
@@ -168,6 +178,14 @@ const (
eventVersion = "2.0"
)
// sourceInfo represents information on the client that triggered the
// event notification.
type sourceInfo struct {
Host string `json:"host"`
Port string `json:"port"`
UserAgent string `json:"userAgent"`
}
// NotificationEvent represents an Amazon an S3 bucket notification event.
type NotificationEvent struct {
EventVersion string `json:"eventVersion"`
@@ -179,6 +197,7 @@ type NotificationEvent struct {
RequestParameters map[string]string `json:"requestParameters"`
ResponseElements map[string]string `json:"responseElements"`
S3 eventMeta `json:"s3"`
Source sourceInfo `json:"source"`
}
// Represents the minio sqs type and account id's.

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -285,7 +285,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
return
}
accountID := fmt.Sprintf("%d", time.Now().UTC().UnixNano())
accountID := fmt.Sprintf("%d", UTCNow().UnixNano())
accountARN := fmt.Sprintf(
"%s:%s:%s:%s-%s",
minioTopic,

View File

@@ -268,6 +268,7 @@ func testListenBucketNotificationNilHandler(obj ObjectLayer, instanceType, bucke
[]string{"*.jpg"}, []string{
"s3:ObjectCreated:*",
"s3:ObjectRemoved:*",
"s3:ObjectAccessed:*",
}), 0, nil, credentials.AccessKey, credentials.SecretKey)
if tErr != nil {
t.Fatalf("%s: Failed to create HTTP testRequest for ListenBucketNotification: <ERROR> %v", instanceType, tErr)

View File

@@ -16,7 +16,11 @@
package cmd
import "strings"
import (
"strings"
"github.com/minio/minio-go/pkg/set"
)
// List of valid event types.
var suppportedEventTypes = map[string]struct{}{
@@ -29,6 +33,9 @@ var suppportedEventTypes = map[string]struct{}{
// Object removed event types.
"s3:ObjectRemoved:*": {},
"s3:ObjectRemoved:Delete": {},
"s3:ObjectAccessed:Get": {},
"s3:ObjectAccessed:Head": {},
"s3:ObjectAccessed:*": {},
}
// checkEvent - checks if an event is supported.
@@ -148,6 +155,10 @@ func isValidQueueID(queueARN string) bool {
pgN := serverConfig.Notify.GetPostgreSQLByID(sqsARN.AccountID)
// Postgres can work with only default conn. info.
return pgN.Enable
} else if isMySQLQueue(sqsARN) {
msqlN := serverConfig.Notify.GetMySQLByID(sqsARN.AccountID)
// Mysql can work with only default conn. info.
return msqlN.Enable
} else if isKafkaQueue(sqsARN) {
kafkaN := serverConfig.Notify.GetKafkaByID(sqsARN.AccountID)
return (kafkaN.Enable && len(kafkaN.Brokers) > 0 &&
@@ -200,16 +211,14 @@ func validateQueueConfigs(queueConfigs []queueConfig) APIErrorCode {
// Check all the queue configs for any duplicates.
func checkDuplicateQueueConfigs(configs []queueConfig) APIErrorCode {
var queueConfigARNS []string
queueConfigARNS := set.NewStringSet()
// Navigate through each configs and count the entries.
for _, config := range configs {
queueConfigARNS = append(queueConfigARNS, config.QueueARN)
queueConfigARNS.Add(config.QueueARN)
}
// Check if there are any duplicate counts.
if err := checkDuplicateStrings(queueConfigARNS); err != nil {
errorIf(err, "Invalid queue configs found.")
if len(queueConfigARNS) != len(configs) {
return ErrOverlappingConfigs
}
@@ -244,6 +253,7 @@ func validateNotificationConfig(nConfig notificationConfig) APIErrorCode {
// - elasticsearch
// - redis
// - postgresql
// - mysql
// - kafka
// - webhook
func unmarshalSqsARN(queueARN string) (mSqs arnSQS) {
@@ -263,6 +273,8 @@ func unmarshalSqsARN(queueARN string) (mSqs arnSQS) {
mSqs.Type = queueTypeRedis
case hasSuffix(sqsType, queueTypePostgreSQL):
mSqs.Type = queueTypePostgreSQL
case hasSuffix(sqsType, queueTypeMySQL):
mSqs.Type = queueTypeMySQL
case hasSuffix(sqsType, queueTypeKafka):
mSqs.Type = queueTypeKafka
case hasSuffix(sqsType, queueTypeWebhook):

View File

@@ -24,11 +24,6 @@ import (
"path/filepath"
)
// isSSL - returns true with both cert and key exists.
func isSSL() bool {
return isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())
}
func parsePublicCertFile(certFile string) (certs []*x509.Certificate, err error) {
var bytes []byte
@@ -60,11 +55,6 @@ func parsePublicCertFile(certFile string) (certs []*x509.Certificate, err error)
return certs, err
}
// Reads certificate file and returns a list of parsed certificates.
func readCertificateChain() ([]*x509.Certificate, error) {
return parsePublicCertFile(getPublicCertFile())
}
func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
// Get all CA file names.
var caFiles []string
@@ -100,9 +90,19 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
return rootCAs, nil
}
// loadRootCAs fetches CA files provided in minio config and adds them to globalRootCAs
// Currently under Windows, there is no way to load system + user CAs at the same time
func loadRootCAs() (err error) {
globalRootCAs, err = getRootCAs(getCADir())
return err
func getSSLConfig() (publicCerts []*x509.Certificate, rootCAs *x509.CertPool, secureConn bool, err error) {
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
return publicCerts, rootCAs, secureConn, err
}
if publicCerts, err = parsePublicCertFile(getPublicCertFile()); err != nil {
return publicCerts, rootCAs, secureConn, err
}
if rootCAs, err = getRootCAs(getCADir()); err != nil {
return publicCerts, rootCAs, secureConn, err
}
secureConn = true
return publicCerts, rootCAs, secureConn, err
}

View File

@@ -1,64 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net"
"os"
"syscall"
)
// checkPortAvailability - check if given port is already in use.
// Note: The check method tries to listen on given port and closes it.
// It is possible to have a disconnected client in this tiny window of time.
func checkPortAvailability(port string) error {
network := [3]string{"tcp", "tcp4", "tcp6"}
for _, n := range network {
l, err := net.Listen(n, net.JoinHostPort("", port))
if err != nil {
if isAddrInUse(err) {
// Return error if another process is listening on the
// same port.
return err
}
// Ignore any other error (ex. EAFNOSUPPORT)
continue
}
// look for error so we don't have dangling connection
if err = l.Close(); err != nil {
return err
}
}
return nil
}
// Return true if err is "address already in use" error.
// syscall.EADDRINUSE is available on all OSes.
func isAddrInUse(err error) bool {
if opErr, ok := err.(*net.OpError); ok {
if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
if errno, ok := sysErr.Err.(syscall.Errno); ok {
if errno == syscall.EADDRINUSE {
return true
}
}
}
}
return false
}

View File

@@ -1,54 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net"
"runtime"
"testing"
)
// Tests for port availability logic written for server startup sequence.
func TestCheckPortAvailability(t *testing.T) {
tests := []struct {
port string
}{
{getFreePort()},
{getFreePort()},
}
for _, test := range tests {
// This test should pass if the ports are available
err := checkPortAvailability(test.port)
if err != nil {
t.Fatalf("checkPortAvailability test failed for port: %s. Error: %v", test.port, err)
}
// Now use the ports and check again
ln, err := net.Listen("tcp", net.JoinHostPort("", test.port))
if err != nil {
t.Fail()
}
defer ln.Close()
err = checkPortAvailability(test.port)
// Skip if the os is windows due to https://github.com/golang/go/issues/7598
if err == nil && runtime.GOOS != globalWindowsOSName {
t.Fatalf("checkPortAvailability should fail for port: %s. Error: %v", test.port, err)
}
}
}

View File

@@ -21,7 +21,6 @@ import (
"sync"
homedir "github.com/minio/go-homedir"
"github.com/minio/mc/pkg/console"
)
const (
@@ -97,9 +96,7 @@ func (config *ConfigDir) GetPrivateKeyFile() string {
func mustGetDefaultConfigDir() string {
homeDir, err := homedir.Dir()
if err != nil {
console.Fatalln("Unable to get home directory.", err)
}
fatalIf(err, "Unable to get home directory.")
return filepath.Join(homeDir, defaultMinioConfigDir)
}
@@ -133,7 +130,3 @@ func getPublicCertFile() string {
func getPrivateKeyFile() string {
return configDir.GetPrivateKeyFile()
}
func isConfigFileExists() bool {
return isFile(getConfigFile())
}

File diff suppressed because it is too large Load Diff

View File

@@ -45,12 +45,12 @@ func TestServerConfigMigrateV1(t *testing.T) {
t.Fatal("Unexpected error: ", err)
}
// Check if config v1 is removed from filesystem
if _, err := os.Stat(configPath); err == nil || !os.IsNotExist(err) {
if _, err := osStat(configPath); err == nil || !os.IsNotExist(err) {
t.Fatal("Config V1 file is not purged")
}
// Initialize server config and check again if everything is fine
if err := loadConfig(envParams{}); err != nil {
if err := loadConfig(); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
}
@@ -109,10 +109,23 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) {
if err := migrateV13ToV14(); err != nil {
t.Fatal("migrate v13 to v14 should succeed when no config file is found")
}
if err := migrateV14ToV15(); err != nil {
t.Fatal("migrate v14 to v15 should succeed when no config file is found")
}
if err := migrateV15ToV16(); err != nil {
t.Fatal("migrate v15 to v16 should succeed when no config file is found")
}
if err := migrateV16ToV17(); err != nil {
t.Fatal("migrate v16 to v17 should succeed when no config file is found")
}
if err := migrateV17ToV18(); err != nil {
t.Fatal("migrate v17 to v18 should succeed when no config file is found")
}
}
// Test if a config migration from v2 to v12 is successfully done
func TestServerConfigMigrateV2toV14(t *testing.T) {
// Test if a config migration from v2 to v18 is successfully done
func TestServerConfigMigrateV2toV18(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("Init Test config failed")
@@ -146,12 +159,12 @@ func TestServerConfigMigrateV2toV14(t *testing.T) {
}
// Initialize server config and check again if everything is fine
if err := loadConfig(envParams{}); err != nil {
if err := loadConfig(); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
// Check the version number in the upgraded config file
expectedVersion := v14
expectedVersion := v18
if serverConfig.Version != expectedVersion {
t.Fatalf("Expect version "+expectedVersion+", found: %v", serverConfig.Version)
}
@@ -219,4 +232,16 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
if err := migrateV13ToV14(); err == nil {
t.Fatal("migrateConfigV13ToV14() should fail with a corrupted json")
}
if err := migrateV14ToV15(); err == nil {
t.Fatal("migrateConfigV14ToV15() should fail with a corrupted json")
}
if err := migrateV15ToV16(); err == nil {
t.Fatal("migrateConfigV15ToV16() should fail with a corrupted json")
}
if err := migrateV16ToV17(); err == nil {
t.Fatal("migrateConfigV16ToV17() should fail with a corrupted json")
}
if err := migrateV17ToV18(); err == nil {
t.Fatal("migrateConfigV17ToV18() should fail with a corrupted json")
}
}

View File

@@ -16,30 +16,7 @@
package cmd
import (
"os"
"path/filepath"
"sync"
"github.com/minio/minio/pkg/quick"
)
func loadOldConfig(configFile string, config interface{}) (interface{}, error) {
if _, err := os.Stat(configFile); err != nil {
return nil, err
}
qc, err := quick.New(config)
if err != nil {
return nil, err
}
if err = qc.Load(configFile); err != nil {
return nil, err
}
return config, nil
}
import "sync"
/////////////////// Config V1 ///////////////////
type configV1 struct {
@@ -48,16 +25,6 @@ type configV1 struct {
SecretKey string `json:"secretAccessKey"`
}
// loadConfigV1 load config
func loadConfigV1() (*configV1, error) {
configFile := filepath.Join(getConfigDir(), "fsUsers.json")
config, err := loadOldConfig(configFile, &configV1{Version: "1"})
if config == nil {
return nil, err
}
return config.(*configV1), err
}
/////////////////// Config V2 ///////////////////
type configV2 struct {
Version string `json:"version"`
@@ -80,18 +47,7 @@ type configV2 struct {
} `json:"fileLogger"`
}
// loadConfigV2 load config version '2'.
func loadConfigV2() (*configV2, error) {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &configV2{Version: "2"})
if config == nil {
return nil, err
}
return config.(*configV2), err
}
/////////////////// Config V3 ///////////////////
// backendV3 type.
type backendV3 struct {
Type string `json:"type"`
@@ -143,16 +99,6 @@ type configV3 struct {
Logger loggerV3 `json:"logger"`
}
// loadConfigV3 load config version '3'.
func loadConfigV3() (*configV3, error) {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &configV3{Version: "3"})
if config == nil {
return nil, err
}
return config.(*configV3), err
}
// logger type representing version '4' logger config.
type loggerV4 struct {
Console struct {
@@ -183,16 +129,6 @@ type configV4 struct {
Logger loggerV4 `json:"logger"`
}
// loadConfigV4 load config version '4'.
func loadConfigV4() (*configV4, error) {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &configV4{Version: "4"})
if config == nil {
return nil, err
}
return config.(*configV4), err
}
// logger type representing version '5' logger config.
type loggerV5 struct {
Console struct {
@@ -250,20 +186,22 @@ type configV5 struct {
Logger loggerV5 `json:"logger"`
}
// loadConfigV5 load config version '5'.
func loadConfigV5() (*configV5, error) {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &configV5{Version: "5"})
if config == nil {
return nil, err
}
return config.(*configV5), err
// consoleLogger - default logger if not other logging is enabled.
type consoleLoggerV1 struct {
Enable bool `json:"enable"`
Level string `json:"level"`
}
type fileLoggerV1 struct {
Enable bool `json:"enable"`
Filename string `json:"fileName"`
Level string `json:"level"`
}
type loggerV6 struct {
Console consoleLogger `json:"console"`
File fileLogger `json:"file"`
Syslog syslogLoggerV3 `json:"syslog"`
Console consoleLoggerV1 `json:"console"`
File fileLoggerV1 `json:"file"`
Syslog syslogLoggerV3 `json:"syslog"`
}
// configV6 server configuration version '6'.
@@ -281,16 +219,6 @@ type configV6 struct {
Notify notifierV1 `json:"notify"`
}
// loadConfigV6 load config version '6'.
func loadConfigV6() (*configV6, error) {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &configV6{Version: "6"})
if config == nil {
return nil, err
}
return config.(*configV6), err
}
// Notifier represents collection of supported notification queues in version
// 1 without NATS streaming.
type notifierV1 struct {
@@ -331,16 +259,6 @@ type serverConfigV7 struct {
rwMutex *sync.RWMutex
}
// loadConfigV7 load config version '7'.
func loadConfigV7() (*serverConfigV7, error) {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV7{Version: "7"})
if config == nil {
return nil, err
}
return config.(*serverConfigV7), err
}
// serverConfigV8 server configuration version '8'. Adds NATS notifier
// configuration.
type serverConfigV8 struct {
@@ -360,16 +278,6 @@ type serverConfigV8 struct {
rwMutex *sync.RWMutex
}
// loadConfigV8 load config version '8'.
func loadConfigV8() (*serverConfigV8, error) {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV8{Version: "8"})
if config == nil {
return nil, err
}
return config.(*serverConfigV8), err
}
// serverConfigV9 server configuration version '9'. Adds PostgreSQL
// notifier configuration.
type serverConfigV9 struct {
@@ -389,13 +297,10 @@ type serverConfigV9 struct {
rwMutex *sync.RWMutex
}
func loadConfigV9() (*serverConfigV9, error) {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV9{Version: "9"})
if config == nil {
return nil, err
}
return config.(*serverConfigV9), err
type loggerV7 struct {
sync.RWMutex
Console consoleLoggerV1 `json:"console"`
File fileLoggerV1 `json:"file"`
}
// serverConfigV10 server configuration version '10' which is like
@@ -409,21 +314,12 @@ type serverConfigV10 struct {
Region string `json:"region"`
// Additional error logging configuration.
Logger logger `json:"logger"`
Logger loggerV7 `json:"logger"`
// Notification queue configuration.
Notify notifierV1 `json:"notify"`
}
func loadConfigV10() (*serverConfigV10, error) {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV10{Version: "10"})
if config == nil {
return nil, err
}
return config.(*serverConfigV10), err
}
// natsNotifyV1 - structure was valid until config V 11
type natsNotifyV1 struct {
Enable bool `json:"enable"`
@@ -446,21 +342,12 @@ type serverConfigV11 struct {
Region string `json:"region"`
// Additional error logging configuration.
Logger logger `json:"logger"`
Logger loggerV7 `json:"logger"`
// Notification queue configuration.
Notify notifierV1 `json:"notify"`
}
func loadConfigV11() (*serverConfigV11, error) {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV11{Version: "11"})
if config == nil {
return nil, err
}
return config.(*serverConfigV11), err
}
// serverConfigV12 server configuration version '12' which is like
// version '11' except it adds support for NATS streaming notifications.
type serverConfigV12 struct {
@@ -471,21 +358,12 @@ type serverConfigV12 struct {
Region string `json:"region"`
// Additional error logging configuration.
Logger logger `json:"logger"`
Logger loggerV7 `json:"logger"`
// Notification queue configuration.
Notify notifierV2 `json:"notify"`
}
func loadConfigV12() (*serverConfigV12, error) {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV12{Version: "12"})
if config == nil {
return nil, err
}
return config.(*serverConfigV12), err
}
// serverConfigV13 server configuration version '13' which is like
// version '12' except it adds support for webhook notification.
type serverConfigV13 struct {
@@ -496,17 +374,78 @@ type serverConfigV13 struct {
Region string `json:"region"`
// Additional error logging configuration.
Logger *logger `json:"logger"`
Logger *loggerV7 `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
}
func loadConfigV13() (*serverConfigV13, error) {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV13{Version: "13"})
if config == nil {
return nil, err
}
return config.(*serverConfigV13), err
// serverConfigV14 server configuration version '14' which is like
// version '13' except it adds support of browser param.
type serverConfigV14 struct {
Version string `json:"version"`
// S3 API configuration.
Credential credential `json:"credential"`
Region string `json:"region"`
Browser BrowserFlag `json:"browser"`
// Additional error logging configuration.
Logger *loggerV7 `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
}
// serverConfigV15 server configuration version '15' which is like
// version '14' except it adds mysql support
type serverConfigV15 struct {
Version string `json:"version"`
// S3 API configuration.
Credential credential `json:"credential"`
Region string `json:"region"`
Browser BrowserFlag `json:"browser"`
// Additional error logging configuration.
Logger *loggerV7 `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
}
// serverConfigV16 server configuration version '16' which is like
// version '15' except it makes a change to logging configuration.
type serverConfigV16 struct {
Version string `json:"version"`
// S3 API configuration.
Credential credential `json:"credential"`
Region string `json:"region"`
Browser BrowserFlag `json:"browser"`
// Additional error logging configuration.
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
}
// serverConfigV17 server configuration version '17' which is like
// version '16' except it adds support for "format" parameter in
// database event notification targets: PostgreSQL, MySQL, Redis and
// Elasticsearch.
type serverConfigV17 struct {
Version string `json:"version"`
// S3 API configuration.
Credential credential `json:"credential"`
Region string `json:"region"`
Browser BrowserFlag `json:"browser"`
// Additional error logging configuration.
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
}

View File

@@ -20,51 +20,119 @@ import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"sync"
"github.com/minio/minio/pkg/quick"
"github.com/tidwall/gjson"
)
// Read Write mutex for safe access to ServerConfig.
var serverConfigMu sync.RWMutex
// Config version
var v14 = "14"
const v18 = "18"
// serverConfigV14 server configuration version '14' which is like
// version '13' except it adds support of browser param.
type serverConfigV14 struct {
var (
// serverConfig server config.
serverConfig *serverConfigV18
serverConfigMu sync.RWMutex
)
// serverConfigV18 server configuration version '18' which is like
// version '17' except it adds support for "deliveryMode" parameter in
// the AMQP notification target.
type serverConfigV18 struct {
sync.RWMutex
Version string `json:"version"`
// S3 API configuration.
Credential credential `json:"credential"`
Region string `json:"region"`
Browser string `json:"browser"`
Credential credential `json:"credential"`
Region string `json:"region"`
Browser BrowserFlag `json:"browser"`
// Additional error logging configuration.
Logger *logger `json:"logger"`
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
}
func newServerConfigV14() *serverConfigV14 {
srvCfg := &serverConfigV14{
Version: v14,
Region: globalMinioDefaultRegion,
Logger: &logger{},
Notify: &notifier{},
// GetVersion get current config version.
func (s *serverConfigV18) GetVersion() string {
s.RLock()
defer s.RUnlock()
return s.Version
}
// SetRegion set new region.
func (s *serverConfigV18) SetRegion(region string) {
s.Lock()
defer s.Unlock()
s.Region = region
}
// GetRegion get current region.
func (s *serverConfigV18) GetRegion() string {
s.RLock()
defer s.RUnlock()
return s.Region
}
// SetCredentials set new credentials.
func (s *serverConfigV18) SetCredential(creds credential) {
s.Lock()
defer s.Unlock()
// Set updated credential.
s.Credential = creds
}
// GetCredentials get current credentials.
func (s *serverConfigV18) GetCredential() credential {
s.RLock()
defer s.RUnlock()
return s.Credential
}
// SetBrowser set if browser is enabled.
func (s *serverConfigV18) SetBrowser(b bool) {
s.Lock()
defer s.Unlock()
// Set the new value.
s.Browser = BrowserFlag(b)
}
// GetCredentials get current credentials.
func (s *serverConfigV18) GetBrowser() bool {
s.RLock()
defer s.RUnlock()
return bool(s.Browser)
}
// Save config.
func (s *serverConfigV18) Save() error {
s.RLock()
defer s.RUnlock()
// Save config file.
return quick.Save(getConfigFile(), s)
}
func newServerConfigV18() *serverConfigV18 {
srvCfg := &serverConfigV18{
Version: v18,
Credential: mustGetNewCredential(),
Region: globalMinioDefaultRegion,
Browser: true,
Logger: &loggers{},
Notify: &notifier{},
}
srvCfg.SetCredential(mustGetNewCredential())
srvCfg.SetBrowser("on")
// Enable console logger by default on a fresh run.
srvCfg.Logger.Console = consoleLogger{
Enable: true,
Level: "error",
}
srvCfg.Logger.Console = NewConsoleLogger()
// Make sure to initialize notification configs.
srvCfg.Notify.AMQP = make(map[string]amqpNotify)
@@ -77,6 +145,8 @@ func newServerConfigV14() *serverConfigV14 {
srvCfg.Notify.NATS["1"] = natsNotify{}
srvCfg.Notify.PostgreSQL = make(map[string]postgreSQLNotify)
srvCfg.Notify.PostgreSQL["1"] = postgreSQLNotify{}
srvCfg.Notify.MySQL = make(map[string]mySQLNotify)
srvCfg.Notify.MySQL["1"] = mySQLNotify{}
srvCfg.Notify.Kafka = make(map[string]kafkaNotify)
srvCfg.Notify.Kafka["1"] = kafkaNotify{}
srvCfg.Notify.Webhook = make(map[string]webhookNotify)
@@ -87,22 +157,21 @@ func newServerConfigV14() *serverConfigV14 {
// newConfig - initialize a new server config, saves env parameters if
// found, otherwise use default parameters
func newConfig(envParams envParams) error {
func newConfig() error {
// Initialize server config.
srvCfg := newServerConfigV14()
srvCfg := newServerConfigV18()
// If env is set for a fresh start, save them to config file.
// If env is set override the credentials from config file.
if globalIsEnvCreds {
srvCfg.SetCredential(envParams.creds)
srvCfg.SetCredential(globalActiveCred)
}
if globalIsEnvBrowser {
srvCfg.SetBrowser(envParams.browser)
srvCfg.SetBrowser(globalIsBrowserEnabled)
}
// Create config path.
if err := createConfigDir(); err != nil {
return err
if globalIsEnvRegion {
srvCfg.SetRegion(globalServerRegion)
}
// hold the mutex lock before a new config is assigned.
@@ -116,51 +185,6 @@ func newConfig(envParams envParams) error {
return serverConfig.Save()
}
// loadConfig - loads a new config from disk, overrides params from env
// if found and valid
func loadConfig(envParams envParams) error {
configFile := getConfigFile()
if _, err := os.Stat(configFile); err != nil {
return err
}
srvCfg := &serverConfigV14{}
qc, err := quick.New(srvCfg)
if err != nil {
return err
}
if err = qc.Load(configFile); err != nil {
return err
}
// If env is set override the credentials from config file.
if globalIsEnvCreds {
srvCfg.SetCredential(envParams.creds)
}
if globalIsEnvBrowser {
srvCfg.SetBrowser(envParams.browser)
}
if strings.ToLower(srvCfg.GetBrowser()) == "off" {
globalIsBrowserEnabled = false
}
// hold the mutex lock before a new config is assigned.
serverConfigMu.Lock()
// Save the loaded config globally.
serverConfig = srvCfg
serverConfigMu.Unlock()
if serverConfig.Version != v14 {
return errors.New("Unsupported config version `" + serverConfig.Version + "`.")
}
return nil
}
// doCheckDupJSONKeys recursively detects duplicate json keys
func doCheckDupJSONKeys(key, value gjson.Result) error {
// Key occurrences map of the current scope to count
@@ -212,159 +236,91 @@ func checkDupJSONKeys(json string) error {
return doCheckDupJSONKeys(rootKey, config)
}
// validateConfig checks for
func validateConfig() error {
// getValidConfig - returns valid server configuration
func getValidConfig() (*serverConfigV18, error) {
srvCfg := &serverConfigV18{
Region: globalMinioDefaultRegion,
Browser: true,
}
// Get file config path
configFile := getConfigFile()
srvCfg := &serverConfigV14{}
// Load config file
qc, err := quick.New(srvCfg)
if err != nil {
return err
}
if err = qc.Load(configFile); err != nil {
return err
if _, err := quick.Load(configFile, srvCfg); err != nil {
return nil, err
}
// Check if config version is valid
if srvCfg.GetVersion() != v14 {
return errors.New("bad config version, expected: " + v14)
if srvCfg.Version != v18 {
return nil, fmt.Errorf("configuration version mismatch. Expected: %s, Got: %s", v18, srvCfg.Version)
}
// Load config file json and check for duplication json keys
jsonBytes, err := ioutil.ReadFile(configFile)
if err != nil {
return err
return nil, err
}
if err := checkDupJSONKeys(string(jsonBytes)); err != nil {
return err
if err = checkDupJSONKeys(string(jsonBytes)); err != nil {
return nil, err
}
// Validate region field
if srvCfg.GetRegion() == "" {
return errors.New("Region config value cannot be empty")
if srvCfg.Region == "" {
return nil, errors.New("Region config value cannot be empty")
}
// Validate browser field
if b := strings.ToLower(srvCfg.GetBrowser()); b != "on" && b != "off" {
return fmt.Errorf("Browser config value %s is invalid", b)
}
// Validate credential fields only when
// they are not set via the environment
// Validate credential field
if !srvCfg.Credential.IsValid() {
return errors.New("invalid credential")
// Error out if global is env credential is not set and config has invalid credential
if !globalIsEnvCreds && !srvCfg.Credential.IsValid() {
return nil, errors.New("invalid credential in config file " + configFile)
}
// Validate logger field
if err := srvCfg.Logger.Validate(); err != nil {
return err
if err = srvCfg.Logger.Validate(); err != nil {
return nil, err
}
// Validate notify field
if err := srvCfg.Notify.Validate(); err != nil {
return err
if err = srvCfg.Notify.Validate(); err != nil {
return nil, err
}
return nil
return srvCfg, nil
}
// serverConfig server config.
var serverConfig *serverConfigV14
// GetVersion get current config version.
func (s serverConfigV14) GetVersion() string {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Version
}
// SetRegion set new region.
func (s *serverConfigV14) SetRegion(region string) {
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
// Empty region means "us-east-1" by default from S3 spec.
if region == "" {
region = "us-east-1"
}
s.Region = region
}
// GetRegion get current region.
func (s serverConfigV14) GetRegion() string {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
if s.Region != "" {
return s.Region
} // region empty
// Empty region means "us-east-1" by default from S3 spec.
return "us-east-1"
}
// SetCredentials set new credentials.
func (s *serverConfigV14) SetCredential(creds credential) {
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
// Set updated credential.
s.Credential = creds
}
// GetCredentials get current credentials.
func (s serverConfigV14) GetCredential() credential {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Credential
}
// SetBrowser set if browser is enabled.
func (s *serverConfigV14) SetBrowser(v string) {
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
// Set browser param
if v == "" {
v = "on" // Browser is on by default.
}
// Set the new value.
s.Browser = v
}
// GetCredentials get current credentials.
func (s serverConfigV14) GetBrowser() string {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
if s.Browser != "" {
return s.Browser
} // empty browser.
// Empty browser means "on" by default.
return "on"
}
// Save config.
func (s serverConfigV14) Save() error {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
// get config file.
configFile := getConfigFile()
// initialize quick.
qc, err := quick.New(&s)
// loadConfig - loads a new config from disk, overrides params from env
// if found and valid
func loadConfig() error {
srvCfg, err := getValidConfig()
if err != nil {
return err
}
// Save config file.
return qc.Save(configFile)
// If env is set override the credentials from config file.
if globalIsEnvCreds {
srvCfg.SetCredential(globalActiveCred)
}
if globalIsEnvBrowser {
srvCfg.SetBrowser(globalIsBrowserEnabled)
}
if globalIsEnvRegion {
srvCfg.SetRegion(globalServerRegion)
}
// hold the mutex lock before a new config is assigned.
serverConfigMu.Lock()
serverConfig = srvCfg
if !globalIsEnvCreds {
globalActiveCred = serverConfig.GetCredential()
}
if !globalIsEnvBrowser {
globalIsBrowserEnabled = serverConfig.GetBrowser()
}
if !globalIsEnvRegion {
globalServerRegion = serverConfig.GetRegion()
}
serverConfigMu.Unlock()
return nil
}

View File

@@ -76,38 +76,41 @@ func TestServerConfig(t *testing.T) {
serverConfig.Notify.SetWebhookByID("2", webhookNotify{})
savedNotifyCfg5 := serverConfig.Notify.GetWebhookByID("2")
if !reflect.DeepEqual(savedNotifyCfg5, webhookNotify{}) {
t.Errorf("Expecting Webhook config %#v found %#v", webhookNotify{}, savedNotifyCfg3)
t.Errorf("Expecting Webhook config %#v found %#v", webhookNotify{}, savedNotifyCfg5)
}
// Set new console logger.
serverConfig.Logger.SetConsole(consoleLogger{
Enable: true,
})
// Set new Webhook notification id.
serverConfig.Notify.SetMySQLByID("2", mySQLNotify{})
savedNotifyCfg6 := serverConfig.Notify.GetMySQLByID("2")
if !reflect.DeepEqual(savedNotifyCfg6, mySQLNotify{}) {
t.Errorf("Expecting Webhook config %#v found %#v", mySQLNotify{}, savedNotifyCfg6)
}
consoleLogger := NewConsoleLogger()
serverConfig.Logger.SetConsole(consoleLogger)
consoleCfg := serverConfig.Logger.GetConsole()
if !reflect.DeepEqual(consoleCfg, consoleLogger{Enable: true}) {
t.Errorf("Expecting console logger config %#v found %#v", consoleLogger{Enable: true}, consoleCfg)
if !reflect.DeepEqual(consoleCfg, consoleLogger) {
t.Errorf("Expecting console logger config %#v found %#v", consoleLogger, consoleCfg)
}
// Set new console logger.
serverConfig.Logger.SetConsole(consoleLogger{
Enable: false,
})
consoleLogger.Enable = false
serverConfig.Logger.SetConsole(consoleLogger)
// Set new file logger.
serverConfig.Logger.SetFile(fileLogger{
Enable: true,
})
fileLogger := NewFileLogger("test-log-file")
serverConfig.Logger.SetFile(fileLogger)
fileCfg := serverConfig.Logger.GetFile()
if !reflect.DeepEqual(fileCfg, fileLogger{Enable: true}) {
t.Errorf("Expecting file logger config %#v found %#v", fileLogger{Enable: true}, consoleCfg)
if !reflect.DeepEqual(fileCfg, fileLogger) {
t.Errorf("Expecting file logger config %#v found %#v", fileLogger, fileCfg)
}
// Set new file logger.
serverConfig.Logger.SetFile(fileLogger{
Enable: false,
})
fileLogger.Enable = false
serverConfig.Logger.SetFile(fileLogger)
// Match version.
if serverConfig.GetVersion() != v14 {
t.Errorf("Expecting version %s found %s", serverConfig.GetVersion(), v14)
if serverConfig.GetVersion() != v18 {
t.Errorf("Expecting version %s found %s", serverConfig.GetVersion(), v18)
}
// Attempt to save.
@@ -119,7 +122,7 @@ func TestServerConfig(t *testing.T) {
setConfigDir(rootPath)
// Initialize server config.
if err := loadConfig(envParams{}); err != nil {
if err := loadConfig(); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
}
@@ -135,10 +138,10 @@ func TestServerConfigWithEnvs(t *testing.T) {
os.Setenv("MINIO_SECRET_KEY", "minio123")
defer os.Unsetenv("MINIO_SECRET_KEY")
defer func() {
globalIsEnvBrowser = false
globalIsEnvCreds = false
}()
os.Setenv("MINIO_REGION", "us-west-1")
defer os.Unsetenv("MINIO_REGION")
defer resetGlobalIsEnvs()
// Get test root.
rootPath, err := getTestRoot()
@@ -146,6 +149,8 @@ func TestServerConfigWithEnvs(t *testing.T) {
t.Error(err)
}
serverHandleEnvVars()
// Do this only once here.
setConfigDir(rootPath)
@@ -156,8 +161,13 @@ func TestServerConfigWithEnvs(t *testing.T) {
defer removeAll(rootPath)
// Check if serverConfig has
if serverConfig.GetBrowser() != "off" {
t.Errorf("Expecting browser `off` found %s", serverConfig.GetBrowser())
if serverConfig.GetBrowser() {
t.Errorf("Expecting browser is set to false found %v", serverConfig.GetBrowser())
}
// Check if serverConfig has
if serverConfig.GetRegion() != "us-west-1" {
t.Errorf("Expecting region to be \"us-west-1\" found %v", serverConfig.GetRegion())
}
// Check if serverConfig has
@@ -170,6 +180,7 @@ func TestServerConfigWithEnvs(t *testing.T) {
if cred.SecretKey != "minio123" {
t.Errorf("Expecting access key to be `minio123` found %s", cred.SecretKey)
}
}
func TestCheckDupJSONKeys(t *testing.T) {
@@ -212,7 +223,7 @@ func TestValidateConfig(t *testing.T) {
configPath := filepath.Join(rootPath, minioConfigFile)
v := v14
v := v18
testCases := []struct {
configData string
@@ -248,39 +259,63 @@ func TestValidateConfig(t *testing.T) {
// Test 10 - duplicated json keys
{`{"version": "` + v + `", "browser": "on", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, false},
// Test 11 - Wrong Console logger level
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "logger": { "console": { "enable": true, "level": "foo" } }}`, false},
// Test 11 - empty filename field in File
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "logger": { "file": { "enable": true, "filename": "" } }}`, false},
// Test 12 - Wrong File logger level
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "logger": { "file": { "enable": true, "level": "foo" } }}`, false},
// Test 13 - Test AMQP
// Test 12 - Test AMQP
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false},
// Test 14 - Test NATS
// Test 13 - Test NATS
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "clientID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
// Test 15 - Test ElasticSearch
// Test 14 - Test ElasticSearch
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false},
// Test 16 - Test Redis
// Test 15 - Test Redis
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, false},
// Test 17 - Test PostgreSQL
// Test 16 - Test PostgreSQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
// Test 18 - Test Kafka
// Test 17 - Test Kafka
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, false},
// Test 19 - Test Webhook
// Test 18 - Test Webhook
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, false},
// Test 20 - Test MySQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
// Test 21 - Test Format for MySQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
// Test 22 - Test valid Format for MySQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "namespace", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
// Test 23 - Test Format for PostgreSQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "invalid", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
// Test 24 - Test valid Format for PostgreSQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "namespace", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
// Test 25 - Test Format for ElasticSearch
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "invalid", "url": "example.com", "index": "myindex" } }}}`, false},
// Test 26 - Test valid Format for ElasticSearch
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "namespace", "url": "example.com", "index": "myindex" } }}}`, true},
// Test 27 - Test Format for Redis
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, false},
// Test 28 - Test valid Format for Redis
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
}
for i, testCase := range testCases {
if werr := ioutil.WriteFile(configPath, []byte(testCase.configData), 0700); werr != nil {
t.Fatal(werr)
}
verr := validateConfig()
_, verr := getValidConfig()
if testCase.shouldPass && verr != nil {
t.Errorf("Test %d, should pass but it failed with err = %v", i+1, verr)
}

74
cmd/console-logger.go Normal file
View File

@@ -0,0 +1,74 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"os"
"github.com/Sirupsen/logrus"
)
// ConsoleLogger - console logger which logs into stderr.
type ConsoleLogger struct {
BaseLogTarget
}
// Fire - log entry handler.
func (logger ConsoleLogger) Fire(entry *logrus.Entry) error {
if !logger.Enable {
return nil
}
msgBytes, err := logger.formatter.Format(entry)
if err == nil {
fmt.Fprintf(os.Stderr, string(msgBytes))
}
return err
}
// String - represents ConsoleLogger as string.
func (logger ConsoleLogger) String() string {
enableStr := "disabled"
if logger.Enable {
enableStr = "enabled"
}
return fmt.Sprintf("console:%s", enableStr)
}
// NewConsoleLogger - return new console logger object.
func NewConsoleLogger() (logger ConsoleLogger) {
logger.Enable = true
logger.formatter = new(logrus.TextFormatter)
return logger
}
// InitConsoleLogger - initializes console logger.
func InitConsoleLogger(logger *ConsoleLogger) {
if !logger.Enable {
return
}
if logger.formatter == nil {
logger.formatter = new(logrus.TextFormatter)
}
return
}

View File

@@ -21,8 +21,6 @@ import (
"encoding/base64"
"errors"
"github.com/minio/mc/pkg/console"
"golang.org/x/crypto/bcrypt"
)
@@ -41,28 +39,6 @@ var (
)
var secretKeyMaxLen = secretKeyMaxLenAmazon
func mustGetAccessKey() string {
keyBytes := make([]byte, accessKeyMaxLen)
if _, err := rand.Read(keyBytes); err != nil {
console.Fatalf("Unable to generate access key. Err: %s.\n", err)
}
for i := 0; i < accessKeyMaxLen; i++ {
keyBytes[i] = alphaNumericTable[keyBytes[i]%alphaNumericTableLen]
}
return string(keyBytes)
}
func mustGetSecretKey() string {
keyBytes := make([]byte, secretKeyMaxLen)
if _, err := rand.Read(keyBytes); err != nil {
console.Fatalf("Unable to generate secret key. Err: %s.\n", err)
}
return string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen])
}
// isAccessKeyValid - validate access key for right length.
func isAccessKeyValid(accessKey string) bool {
return len(accessKey) >= accessKeyMinLen && len(accessKey) <= accessKeyMaxLen
@@ -127,9 +103,8 @@ func createCredential(accessKey, secretKey string) (cred credential, err error)
func mustGetNewCredential() credential {
// Generate access key.
keyBytes := make([]byte, accessKeyMaxLen)
if _, err := rand.Read(keyBytes); err != nil {
console.Fatalln("Unable to generate access key.", err)
}
_, err := rand.Read(keyBytes)
fatalIf(err, "Unable to generate access key.")
for i := 0; i < accessKeyMaxLen; i++ {
keyBytes[i] = alphaNumericTable[keyBytes[i]%alphaNumericTableLen]
}
@@ -137,15 +112,12 @@ func mustGetNewCredential() credential {
// Generate secret key.
keyBytes = make([]byte, secretKeyMaxLen)
if _, err := rand.Read(keyBytes); err != nil {
console.Fatalln("Unable to generate secret key.", err)
}
_, err = rand.Read(keyBytes)
fatalIf(err, "Unable to generate secret key.")
secretKey := string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen])
cred, err := createCredential(accessKey, secretKey)
if err != nil {
console.Fatalln("Unable to generate new credential.", err)
}
fatalIf(err, "Unable to generate new credential.")
return cred
}

412
cmd/endpoint.go Normal file
View File

@@ -0,0 +1,412 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"net"
"net/url"
"path"
"sort"
"strconv"
"strings"
"github.com/minio/minio-go/pkg/set"
)
// EndpointType - enum for endpoint type.
type EndpointType int
const (
// PathEndpointType - path style endpoint type enum.
PathEndpointType EndpointType = iota + 1
// URLEndpointType - URL style endpoint type enum.
URLEndpointType
)
// Endpoint - any type of endpoint.
type Endpoint struct {
*url.URL
IsLocal bool
}
func (endpoint Endpoint) String() string {
if endpoint.Host == "" {
return endpoint.Path
}
return endpoint.URL.String()
}
// Type - returns type of endpoint.
func (endpoint Endpoint) Type() EndpointType {
if endpoint.Host == "" {
return PathEndpointType
}
return URLEndpointType
}
// SetHTTPS - sets secure http for URLEndpointType.
func (endpoint Endpoint) SetHTTPS() {
if endpoint.Host != "" {
endpoint.Scheme = "https"
}
}
// SetHTTP - sets insecure http for URLEndpointType.
func (endpoint Endpoint) SetHTTP() {
if endpoint.Host != "" {
endpoint.Scheme = "http"
}
}
// NewEndpoint - returns new endpoint based on given arguments.
func NewEndpoint(arg string) (Endpoint, error) {
// isEmptyPath - check whether given path is not empty.
isEmptyPath := func(path string) bool {
return path == "" || path == "." || path == "/" || path == `\`
}
if isEmptyPath(arg) {
return Endpoint{}, fmt.Errorf("empty or root endpoint is not supported")
}
var isLocal bool
u, err := url.Parse(arg)
if err == nil && u.Host != "" {
// URL style of endpoint.
// Valid URL style endpoint is
// - Scheme field must contain "http" or "https"
// - All field should be empty except Host and Path.
if !((u.Scheme == "http" || u.Scheme == "https") &&
u.User == nil && u.Opaque == "" && u.ForceQuery == false && u.RawQuery == "" && u.Fragment == "") {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format")
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
if !strings.Contains(err.Error(), "missing port in address") {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format: %s", err)
}
host = u.Host
} else {
var p int
p, err = strconv.Atoi(port)
if err != nil {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format: invalid port number")
} else if p < 1 || p > 65535 {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format: port number must be between 1 to 65535")
}
}
if host == "" {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format: empty host name")
}
// As this is path in the URL, we should use path package, not filepath package.
// On MS Windows, filepath.Clean() converts into Windows path style ie `/foo` becomes `\foo`
u.Path = path.Clean(u.Path)
if isEmptyPath(u.Path) {
return Endpoint{}, fmt.Errorf("empty or root path is not supported in URL endpoint")
}
// Get IPv4 address of the host.
hostIPs, err := getHostIP4(host)
if err != nil {
return Endpoint{}, err
}
// If intersection of two IP sets is not empty, then the host is local host.
isLocal = !localIP4.Intersection(hostIPs).IsEmpty()
} else {
u = &url.URL{Path: path.Clean(arg)}
isLocal = true
}
return Endpoint{
URL: u,
IsLocal: isLocal,
}, nil
}
// EndpointList - list of same type of endpoint.
type EndpointList []Endpoint
// Swap - helper method for sorting.
func (endpoints EndpointList) Swap(i, j int) {
endpoints[i], endpoints[j] = endpoints[j], endpoints[i]
}
// Len - helper method for sorting.
func (endpoints EndpointList) Len() int {
return len(endpoints)
}
// Less - helper method for sorting.
func (endpoints EndpointList) Less(i, j int) bool {
return endpoints[i].String() < endpoints[j].String()
}
// SetHTTPS - sets secure http for URLEndpointType.
func (endpoints EndpointList) SetHTTPS() {
for i := range endpoints {
endpoints[i].SetHTTPS()
}
}
// SetHTTP - sets insecure http for URLEndpointType.
func (endpoints EndpointList) SetHTTP() {
for i := range endpoints {
endpoints[i].SetHTTP()
}
}
// NewEndpointList - returns new endpoint list based on input args.
func NewEndpointList(args ...string) (endpoints EndpointList, err error) {
// isValidDistribution - checks whether given count is a valid distribution for erasure coding.
isValidDistribution := func(count int) bool {
return (count >= minErasureBlocks && count <= maxErasureBlocks && count%2 == 0)
}
// Check whether no. of args are valid for XL distribution.
if !isValidDistribution(len(args)) {
return nil, fmt.Errorf("A total of %d endpoints were found. For erasure mode it should be an even number between %d and %d", len(args), minErasureBlocks, maxErasureBlocks)
}
var endpointType EndpointType
var scheme string
uniqueArgs := set.NewStringSet()
// Loop through args and adds to endpoint list.
for i, arg := range args {
endpoint, err := NewEndpoint(arg)
if err != nil {
return nil, fmt.Errorf("'%s': %s", arg, err.Error())
}
// All endpoints have to be same type and scheme if applicable.
if i == 0 {
endpointType = endpoint.Type()
scheme = endpoint.Scheme
} else if endpoint.Type() != endpointType {
return nil, fmt.Errorf("mixed style endpoints are not supported")
} else if endpoint.Scheme != scheme {
return nil, fmt.Errorf("mixed scheme is not supported")
}
arg = endpoint.String()
if uniqueArgs.Contains(arg) {
return nil, fmt.Errorf("duplicate endpoints found")
}
uniqueArgs.Add(arg)
endpoints = append(endpoints, endpoint)
}
sort.Sort(endpoints)
return endpoints, nil
}
// CreateEndpoints - validates and creates new endpoints for given args.
func CreateEndpoints(serverAddr string, args ...string) (string, EndpointList, SetupType, error) {
var endpoints EndpointList
var setupType SetupType
var err error
// Check whether serverAddr is valid for this host.
if err = CheckLocalServerAddr(serverAddr); err != nil {
return serverAddr, endpoints, setupType, err
}
_, serverAddrPort := mustSplitHostPort(serverAddr)
// For single arg, return FS setup.
if len(args) == 1 {
var endpoint Endpoint
endpoint, err = NewEndpoint(args[0])
if err != nil {
return serverAddr, endpoints, setupType, err
}
if endpoint.Type() != PathEndpointType {
return serverAddr, endpoints, setupType, fmt.Errorf("use path style endpoint for FS setup")
}
endpoints = append(endpoints, endpoint)
setupType = FSSetupType
return serverAddr, endpoints, setupType, nil
}
// Convert args to endpoints
if endpoints, err = NewEndpointList(args...); err != nil {
return serverAddr, endpoints, setupType, err
}
// Return XL setup when all endpoints are path style.
if endpoints[0].Type() == PathEndpointType {
setupType = XLSetupType
return serverAddr, endpoints, setupType, nil
}
// Here all endpoints are URL style.
endpointPathSet := set.NewStringSet()
localEndpointCount := 0
localServerAddrSet := set.NewStringSet()
localPortSet := set.NewStringSet()
for _, endpoint := range endpoints {
endpointPathSet.Add(endpoint.Path)
if endpoint.IsLocal {
localServerAddrSet.Add(endpoint.Host)
var port string
_, port, err = net.SplitHostPort(endpoint.Host)
if err != nil {
port = serverAddrPort
}
localPortSet.Add(port)
localEndpointCount++
}
}
// No local endpoint found.
if localEndpointCount == 0 {
return serverAddr, endpoints, setupType, fmt.Errorf("no endpoint found for this host")
}
// Check whether same path is not used in endpoints of a host.
{
pathIPMap := make(map[string]set.StringSet)
for _, endpoint := range endpoints {
var host string
host, _, err = net.SplitHostPort(endpoint.Host)
if err != nil {
host = endpoint.Host
}
hostIPSet, _ := getHostIP4(host)
if IPSet, ok := pathIPMap[endpoint.Path]; ok {
if !IPSet.Intersection(hostIPSet).IsEmpty() {
err = fmt.Errorf("path '%s' can not be served by different port on same address", endpoint.Path)
return serverAddr, endpoints, setupType, err
}
pathIPMap[endpoint.Path] = IPSet.Union(hostIPSet)
} else {
pathIPMap[endpoint.Path] = hostIPSet
}
}
}
// Check whether serverAddrPort matches at least in one of port used in local endpoints.
{
if !localPortSet.Contains(serverAddrPort) {
if len(localPortSet) > 1 {
err = fmt.Errorf("port number in server address must match with one of the port in local endpoints")
} else {
err = fmt.Errorf("server address and local endpoint have different ports")
}
return serverAddr, endpoints, setupType, err
}
}
// All endpoints are pointing to local host
if len(endpoints) == localEndpointCount {
// If all endpoints have same port number, then this is XL setup using URL style endpoints.
if len(localPortSet) == 1 {
if len(localServerAddrSet) > 1 {
// TODO: Eventhough all endpoints are local, the local host is referred by different IP/name.
// eg '172.0.0.1', 'localhost' and 'mylocalhostname' point to same local host.
//
// In this case, we bind to 0.0.0.0 ie to all interfaces.
// The actual way to do is bind to only IPs in uniqueLocalHosts.
serverAddr = net.JoinHostPort("", serverAddrPort)
}
endpointPaths := endpointPathSet.ToSlice()
endpoints, _ = NewEndpointList(endpointPaths...)
setupType = XLSetupType
return serverAddr, endpoints, setupType, nil
}
// Eventhough all endpoints are local, but those endpoints use different ports.
// This means it is DistXL setup.
} else {
// This is DistXL setup.
// Check whether local server address are not 127.x.x.x
for _, localServerAddr := range localServerAddrSet.ToSlice() {
host, _, err := net.SplitHostPort(localServerAddr)
if err != nil {
host = localServerAddr
}
ipList, err := getHostIP4(host)
fatalIf(err, "unexpected error when resolving host '%s'", host)
// Filter ipList by IPs those start with '127.'.
loopBackIPs := ipList.FuncMatch(func(ip string, matchString string) bool {
return strings.HasPrefix(ip, "127.")
}, "")
// If loop back IP is found and ipList contains only loop back IPs, then error out.
if len(loopBackIPs) > 0 && len(loopBackIPs) == len(ipList) {
err = fmt.Errorf("'%s' resolves to loopback address is not allowed for distributed XL", localServerAddr)
return serverAddr, endpoints, setupType, err
}
}
}
// Add missing port in all endpoints.
for i := range endpoints {
_, port, err := net.SplitHostPort(endpoints[i].Host)
if err != nil {
endpoints[i].Host = net.JoinHostPort(endpoints[i].Host, serverAddrPort)
} else if endpoints[i].IsLocal && serverAddrPort != port {
// If endpoint is local, but port is different than serverAddrPort, then make it as remote.
endpoints[i].IsLocal = false
}
}
setupType = DistXLSetupType
return serverAddr, endpoints, setupType, nil
}
// GetRemotePeers - get hosts information other than this minio service.
func GetRemotePeers(endpoints EndpointList) []string {
peerSet := set.NewStringSet()
for _, endpoint := range endpoints {
if endpoint.Type() != URLEndpointType {
continue
}
peer := endpoint.Host
if endpoint.IsLocal {
if _, port := mustSplitHostPort(peer); port == globalMinioPort {
continue
}
}
peerSet.Add(peer)
}
return peerSet.ToSlice()
}

372
cmd/endpoint_test.go Normal file
View File

@@ -0,0 +1,372 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"net/url"
"reflect"
"runtime"
"sort"
"strings"
"testing"
)
func TestNewEndpoint(t *testing.T) {
u1, _ := url.Parse("http://localhost/path")
u2, _ := url.Parse("https://example.org/path")
u3, _ := url.Parse("http://127.0.0.1:8080/path")
u4, _ := url.Parse("http://192.168.253.200/path")
errMsg := ": no such host"
if runtime.GOOS == "windows" {
errMsg = ": No such host is known."
}
testCases := []struct {
arg string
expectedEndpoint Endpoint
expectedType EndpointType
expectedErr error
}{
{"foo", Endpoint{URL: &url.URL{Path: "foo"}, IsLocal: true}, PathEndpointType, nil},
{"/foo", Endpoint{URL: &url.URL{Path: "/foo"}, IsLocal: true}, PathEndpointType, nil},
{`\foo`, Endpoint{URL: &url.URL{Path: `\foo`}, IsLocal: true}, PathEndpointType, nil},
{"C", Endpoint{URL: &url.URL{Path: `C`}, IsLocal: true}, PathEndpointType, nil},
{"C:", Endpoint{URL: &url.URL{Path: `C:`}, IsLocal: true}, PathEndpointType, nil},
{"C:/", Endpoint{URL: &url.URL{Path: "C:"}, IsLocal: true}, PathEndpointType, nil},
{`C:\`, Endpoint{URL: &url.URL{Path: `C:\`}, IsLocal: true}, PathEndpointType, nil},
{`C:\foo`, Endpoint{URL: &url.URL{Path: `C:\foo`}, IsLocal: true}, PathEndpointType, nil},
{"C:/foo", Endpoint{URL: &url.URL{Path: "C:/foo"}, IsLocal: true}, PathEndpointType, nil},
{`C:\\foo`, Endpoint{URL: &url.URL{Path: `C:\\foo`}, IsLocal: true}, PathEndpointType, nil},
{"http:path", Endpoint{URL: &url.URL{Path: "http:path"}, IsLocal: true}, PathEndpointType, nil},
{"http:/path", Endpoint{URL: &url.URL{Path: "http:/path"}, IsLocal: true}, PathEndpointType, nil},
{"http:///path", Endpoint{URL: &url.URL{Path: "http:/path"}, IsLocal: true}, PathEndpointType, nil},
{"http://localhost/path", Endpoint{URL: u1, IsLocal: true}, URLEndpointType, nil},
{"http://localhost/path//", Endpoint{URL: u1, IsLocal: true}, URLEndpointType, nil},
{"https://example.org/path", Endpoint{URL: u2}, URLEndpointType, nil},
{"http://127.0.0.1:8080/path", Endpoint{URL: u3, IsLocal: true}, URLEndpointType, nil},
{"http://192.168.253.200/path", Endpoint{URL: u4}, URLEndpointType, nil},
{"", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
{".", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
{"/", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
{`\`, Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
{"c://foo", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format")},
{"ftp://foo", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format")},
{"http://server/path?location", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format")},
{"http://:/path", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format: invalid port number")},
{"http://:8080/path", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format: empty host name")},
{"http://server:/path", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format: invalid port number")},
{"https://93.184.216.34:808080/path", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format: port number must be between 1 to 65535")},
{"http://server:8080//", Endpoint{}, -1, fmt.Errorf("empty or root path is not supported in URL endpoint")},
{"http://server:8080/", Endpoint{}, -1, fmt.Errorf("empty or root path is not supported in URL endpoint")},
{"http://server/path", Endpoint{}, -1, fmt.Errorf("lookup server" + errMsg)},
}
for _, testCase := range testCases {
endpoint, err := NewEndpoint(testCase.arg)
if testCase.expectedErr == nil {
if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else {
var match bool
if strings.HasSuffix(testCase.expectedErr.Error(), errMsg) {
match = strings.HasSuffix(err.Error(), errMsg)
} else {
match = (testCase.expectedErr.Error() == err.Error())
}
if !match {
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
}
if err == nil && !reflect.DeepEqual(testCase.expectedEndpoint, endpoint) {
t.Fatalf("endpoint: expected = %+v, got = %+v", testCase.expectedEndpoint, endpoint)
}
if err == nil && testCase.expectedType != endpoint.Type() {
t.Fatalf("type: expected = %+v, got = %+v", testCase.expectedType, endpoint.Type())
}
}
}
func TestNewEndpointList(t *testing.T) {
testCases := []struct {
args []string
expectedErr error
}{
{[]string{"d1", "d2", "d3", "d4"}, nil},
{[]string{"/d1", "/d2", "/d3", "/d4"}, nil},
{[]string{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}, nil},
{[]string{"http://example.org/d1", "http://example.com/d1", "http://example.net/d1", "http://example.edu/d1"}, nil},
{[]string{"http://localhost/d1", "http://localhost/d2", "http://example.org/d1", "http://example.org/d2"}, nil},
{[]string{"https://localhost:9000/d1", "https://localhost:9001/d2", "https://localhost:9002/d3", "https://localhost:9003/d4"}, nil},
// // It is valid WRT endpoint list that same path is expected with different port on same server.
{[]string{"https://127.0.0.1:9000/d1", "https://127.0.0.1:9001/d1", "https://127.0.0.1:9002/d1", "https://127.0.0.1:9003/d1"}, nil},
{[]string{"d1", "d2", "d3", "d1"}, fmt.Errorf("duplicate endpoints found")},
{[]string{"d1", "d2", "d3", "./d1"}, fmt.Errorf("duplicate endpoints found")},
{[]string{"http://localhost/d1", "http://localhost/d2", "http://localhost/d1", "http://localhost/d4"}, fmt.Errorf("duplicate endpoints found")},
{[]string{"d1", "d2", "d3", "d4", "d5"}, fmt.Errorf("A total of 5 endpoints were found. For erasure mode it should be an even number between 4 and 16")},
{[]string{"ftp://server/d1", "http://server/d2", "http://server/d3", "http://server/d4"}, fmt.Errorf("'ftp://server/d1': invalid URL endpoint format")},
{[]string{"d1", "http://localhost/d2", "d3", "d4"}, fmt.Errorf("mixed style endpoints are not supported")},
{[]string{"http://example.org/d1", "https://example.com/d1", "http://example.net/d1", "https://example.edut/d1"}, fmt.Errorf("mixed scheme is not supported")},
}
for _, testCase := range testCases {
_, err := NewEndpointList(testCase.args...)
if testCase.expectedErr == nil {
if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else if testCase.expectedErr.Error() != err.Error() {
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
}
}
func TestCreateEndpoints(t *testing.T) {
// Filter ipList by IPs those do not start with '127.'.
nonLoopBackIPs := localIP4.FuncMatch(func(ip string, matchString string) bool {
return !strings.HasPrefix(ip, "127.")
}, "")
if len(nonLoopBackIPs) == 0 {
t.Fatalf("No non-loop back IP address found for this host")
}
nonLoopBackIP := nonLoopBackIPs.ToSlice()[0]
getExpectedEndpoints := func(args []string, prefix string) ([]*url.URL, []bool) {
var URLs []*url.URL
var localFlags []bool
sort.Strings(args)
for _, arg := range args {
u, _ := url.Parse(arg)
URLs = append(URLs, u)
localFlags = append(localFlags, strings.HasPrefix(arg, prefix))
}
return URLs, localFlags
}
case1Endpoint1 := "http://" + nonLoopBackIP + "/d1"
case1Endpoint2 := "http://" + nonLoopBackIP + "/d2"
args := []string{
"http://" + nonLoopBackIP + ":10000/d1",
"http://" + nonLoopBackIP + ":10000/d2",
"http://example.com:10000/d4",
"http://example.org:10000/d3",
}
case1URLs, case1LocalFlags := getExpectedEndpoints(args, "http://"+nonLoopBackIP+":10000/")
case2Endpoint1 := "http://" + nonLoopBackIP + "/d1"
case2Endpoint2 := "http://" + nonLoopBackIP + ":9000/d2"
args = []string{
"http://" + nonLoopBackIP + ":10000/d1",
"http://" + nonLoopBackIP + ":9000/d2",
"http://example.com:10000/d4",
"http://example.org:10000/d3",
}
case2URLs, case2LocalFlags := getExpectedEndpoints(args, "http://"+nonLoopBackIP+":10000/")
case3Endpoint1 := "http://" + nonLoopBackIP + "/d1"
args = []string{
"http://" + nonLoopBackIP + ":80/d1",
"http://example.com:80/d3",
"http://example.net:80/d4",
"http://example.org:9000/d2",
}
case3URLs, case3LocalFlags := getExpectedEndpoints(args, "http://"+nonLoopBackIP+":80/")
case4Endpoint1 := "http://" + nonLoopBackIP + "/d1"
args = []string{
"http://" + nonLoopBackIP + ":9000/d1",
"http://example.com:9000/d3",
"http://example.net:9000/d4",
"http://example.org:9000/d2",
}
case4URLs, case4LocalFlags := getExpectedEndpoints(args, "http://"+nonLoopBackIP+":9000/")
case5Endpoint1 := "http://" + nonLoopBackIP + ":9000/d1"
case5Endpoint2 := "http://" + nonLoopBackIP + ":9001/d2"
case5Endpoint3 := "http://" + nonLoopBackIP + ":9002/d3"
case5Endpoint4 := "http://" + nonLoopBackIP + ":9003/d4"
args = []string{
case5Endpoint1,
case5Endpoint2,
case5Endpoint3,
case5Endpoint4,
}
case5URLs, case5LocalFlags := getExpectedEndpoints(args, "http://"+nonLoopBackIP+":9000/")
case6Endpoint := "http://" + nonLoopBackIP + ":9003/d4"
args = []string{
"http://localhost:9000/d1",
"http://localhost:9001/d2",
"http://127.0.0.1:9002/d3",
case6Endpoint,
}
case6URLs, case6LocalFlags := getExpectedEndpoints(args, "http://"+nonLoopBackIP+":9003/")
testCases := []struct {
serverAddr string
args []string
expectedServerAddr string
expectedEndpoints EndpointList
expectedSetupType SetupType
expectedErr error
}{
{"localhost", []string{}, "", EndpointList{}, -1, fmt.Errorf("missing port in address localhost")},
// FS Setup
{"localhost:9000", []string{"http://localhost/d1"}, "", EndpointList{}, -1, fmt.Errorf("use path style endpoint for FS setup")},
{":443", []string{"d1"}, ":443", EndpointList{Endpoint{URL: &url.URL{Path: "d1"}, IsLocal: true}}, FSSetupType, nil},
{"localhost:10000", []string{"/d1"}, "localhost:10000", EndpointList{Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true}}, FSSetupType, nil},
{"localhost:10000", []string{"./d1"}, "localhost:10000", EndpointList{Endpoint{URL: &url.URL{Path: "d1"}, IsLocal: true}}, FSSetupType, nil},
{"localhost:10000", []string{`\d1`}, "localhost:10000", EndpointList{Endpoint{URL: &url.URL{Path: `\d1`}, IsLocal: true}}, FSSetupType, nil},
{"localhost:10000", []string{`.\d1`}, "localhost:10000", EndpointList{Endpoint{URL: &url.URL{Path: `.\d1`}, IsLocal: true}}, FSSetupType, nil},
{":8080", []string{"https://example.org/d1", "https://example.org/d2", "https://example.org/d3", "https://example.org/d4"}, "", EndpointList{}, -1, fmt.Errorf("no endpoint found for this host")},
{":8080", []string{"https://example.org/d1", "https://example.com/d2", "https://example.net:8000/d3", "https://example.edu/d1"}, "", EndpointList{}, -1, fmt.Errorf("no endpoint found for this host")},
{"localhost:9000", []string{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}, "", EndpointList{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")},
{"localhost:9000", []string{"https://127.0.0.1:8000/d1", "https://localhost:9001/d2", "https://example.com/d1", "https://example.com/d2"}, "", EndpointList{}, -1, fmt.Errorf("port number in server address must match with one of the port in local endpoints")},
{"localhost:10000", []string{"https://127.0.0.1:8000/d1", "https://localhost:8000/d2", "https://example.com/d1", "https://example.com/d2"}, "", EndpointList{}, -1, fmt.Errorf("server address and local endpoint have different ports")},
// XL Setup with PathEndpointType
{":1234", []string{"/d1", "/d2", "d3", "d4"}, ":1234",
EndpointList{
Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true},
Endpoint{URL: &url.URL{Path: "/d2"}, IsLocal: true},
Endpoint{URL: &url.URL{Path: "d3"}, IsLocal: true},
Endpoint{URL: &url.URL{Path: "d4"}, IsLocal: true},
}, XLSetupType, nil},
// XL Setup with URLEndpointType
{":9000", []string{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}, ":9000", EndpointList{
Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true},
Endpoint{URL: &url.URL{Path: "/d2"}, IsLocal: true},
Endpoint{URL: &url.URL{Path: "/d3"}, IsLocal: true},
Endpoint{URL: &url.URL{Path: "/d4"}, IsLocal: true},
}, XLSetupType, nil},
// XL Setup with URLEndpointType having mixed naming to local host.
{"127.0.0.1:10000", []string{"http://localhost/d1", "http://localhost/d2", "http://127.0.0.1/d3", "http://127.0.0.1/d4"}, ":10000", EndpointList{
Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true},
Endpoint{URL: &url.URL{Path: "/d2"}, IsLocal: true},
Endpoint{URL: &url.URL{Path: "/d3"}, IsLocal: true},
Endpoint{URL: &url.URL{Path: "/d4"}, IsLocal: true},
}, XLSetupType, nil},
{":9001", []string{"http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export", "http://" + nonLoopBackIP + ":9001/export", "http://10.0.0.2:9001/export"}, "", EndpointList{}, -1, fmt.Errorf("path '/export' can not be served by different port on same address")},
{":9000", []string{"http://localhost/d1", "http://localhost/d2", "http://example.org/d3", "http://example.com/d4"}, "", EndpointList{}, -1, fmt.Errorf("'localhost' resolves to loopback address is not allowed for distributed XL")},
// DistXL type
{"127.0.0.1:10000", []string{case1Endpoint1, case1Endpoint2, "http://example.org/d3", "http://example.com/d4"}, "127.0.0.1:10000", EndpointList{
Endpoint{URL: case1URLs[0], IsLocal: case1LocalFlags[0]},
Endpoint{URL: case1URLs[1], IsLocal: case1LocalFlags[1]},
Endpoint{URL: case1URLs[2], IsLocal: case1LocalFlags[2]},
Endpoint{URL: case1URLs[3], IsLocal: case1LocalFlags[3]},
}, DistXLSetupType, nil},
{"127.0.0.1:10000", []string{case2Endpoint1, case2Endpoint2, "http://example.org/d3", "http://example.com/d4"}, "127.0.0.1:10000", EndpointList{
Endpoint{URL: case2URLs[0], IsLocal: case2LocalFlags[0]},
Endpoint{URL: case2URLs[1], IsLocal: case2LocalFlags[1]},
Endpoint{URL: case2URLs[2], IsLocal: case2LocalFlags[2]},
Endpoint{URL: case2URLs[3], IsLocal: case2LocalFlags[3]},
}, DistXLSetupType, nil},
{":80", []string{case3Endpoint1, "http://example.org:9000/d2", "http://example.com/d3", "http://example.net/d4"}, ":80", EndpointList{
Endpoint{URL: case3URLs[0], IsLocal: case3LocalFlags[0]},
Endpoint{URL: case3URLs[1], IsLocal: case3LocalFlags[1]},
Endpoint{URL: case3URLs[2], IsLocal: case3LocalFlags[2]},
Endpoint{URL: case3URLs[3], IsLocal: case3LocalFlags[3]},
}, DistXLSetupType, nil},
{":9000", []string{case4Endpoint1, "http://example.org/d2", "http://example.com/d3", "http://example.net/d4"}, ":9000", EndpointList{
Endpoint{URL: case4URLs[0], IsLocal: case4LocalFlags[0]},
Endpoint{URL: case4URLs[1], IsLocal: case4LocalFlags[1]},
Endpoint{URL: case4URLs[2], IsLocal: case4LocalFlags[2]},
Endpoint{URL: case4URLs[3], IsLocal: case4LocalFlags[3]},
}, DistXLSetupType, nil},
{":9000", []string{case5Endpoint1, case5Endpoint2, case5Endpoint3, case5Endpoint4}, ":9000", EndpointList{
Endpoint{URL: case5URLs[0], IsLocal: case5LocalFlags[0]},
Endpoint{URL: case5URLs[1], IsLocal: case5LocalFlags[1]},
Endpoint{URL: case5URLs[2], IsLocal: case5LocalFlags[2]},
Endpoint{URL: case5URLs[3], IsLocal: case5LocalFlags[3]},
}, DistXLSetupType, nil},
// DistXL Setup using only local host.
{":9003", []string{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://127.0.0.1:9002/d3", case6Endpoint}, ":9003", EndpointList{
Endpoint{URL: case6URLs[0], IsLocal: case6LocalFlags[0]},
Endpoint{URL: case6URLs[1], IsLocal: case6LocalFlags[1]},
Endpoint{URL: case6URLs[2], IsLocal: case6LocalFlags[2]},
Endpoint{URL: case6URLs[3], IsLocal: case6LocalFlags[3]},
}, DistXLSetupType, nil},
}
for _, testCase := range testCases {
serverAddr, endpoints, setupType, err := CreateEndpoints(testCase.serverAddr, testCase.args...)
if err == nil {
if testCase.expectedErr != nil {
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else {
if serverAddr != testCase.expectedServerAddr {
t.Fatalf("serverAddr: expected = %v, got = %v", testCase.expectedServerAddr, serverAddr)
}
if !reflect.DeepEqual(endpoints, testCase.expectedEndpoints) {
t.Fatalf("endpoints: expected = %v, got = %v", testCase.expectedEndpoints, endpoints)
}
if setupType != testCase.expectedSetupType {
t.Fatalf("setupType: expected = %v, got = %v", testCase.expectedSetupType, setupType)
}
}
} else if testCase.expectedErr == nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
} else if err.Error() != testCase.expectedErr.Error() {
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
}
}
func TestGetRemotePeers(t *testing.T) {
tempGlobalMinioPort := globalMinioPort
defer func() {
globalMinioPort = tempGlobalMinioPort
}()
globalMinioPort = "9000"
testCases := []struct {
endpointArgs []string
expectedResult []string
}{
{[]string{"/d1", "/d2", "d3", "d4"}, []string{}},
{[]string{"http://localhost:9000/d1", "http://localhost:9000/d2", "http://example.org:9000/d3", "http://example.com:9000/d4"}, []string{"example.com:9000", "example.org:9000"}},
{[]string{"http://localhost:9000/d1", "http://localhost:10000/d2", "http://example.org:9000/d3", "http://example.com:9000/d4"}, []string{"example.com:9000", "example.org:9000", "localhost:10000"}},
{[]string{"http://localhost:9000/d1", "http://example.org:9000/d2", "http://example.com:9000/d3", "http://example.net:9000/d4"}, []string{"example.com:9000", "example.net:9000", "example.org:9000"}},
{[]string{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://localhost:9002/d3", "http://localhost:9003/d4"}, []string{"localhost:9001", "localhost:9002", "localhost:9003"}},
}
for _, testCase := range testCases {
endpoints, _ := NewEndpointList(testCase.endpointArgs...)
remotePeers := GetRemotePeers(endpoints)
if !reflect.DeepEqual(remotePeers, testCase.expectedResult) {
t.Fatalf("expected: %v, got: %v", testCase.expectedResult, remotePeers)
}
}
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,7 +20,6 @@ import (
"bytes"
"math/rand"
"testing"
"time"
"reflect"
@@ -195,11 +194,7 @@ func TestErasureReadUtils(t *testing.T) {
if err != nil {
t.Fatal(err)
}
endpoints, err := parseStorageEndpoints(disks)
if err != nil {
t.Fatal(err)
}
objLayer, _, err := initObjectLayer(endpoints)
objLayer, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
if err != nil {
removeRoots(disks)
t.Fatal(err)
@@ -412,7 +407,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
}
// To generate random offset/length.
r := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
r := rand.New(rand.NewSource(UTCNow().UnixNano()))
// create pool buffer which will be used by erasureReadFile for
// reading from disks and erasure decoding.

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -25,11 +25,14 @@ import (
"net/url"
"path"
"sync"
"time"
"github.com/Sirupsen/logrus"
)
const (
minioEventSource = "minio:s3"
)
type externalNotifier struct {
// Per-bucket notification config. This is updated via
// PutBucketNotification API.
@@ -83,11 +86,29 @@ type eventData struct {
Bucket string
ObjInfo ObjectInfo
ReqParams map[string]string
Host string
Port string
UserAgent string
}
// New notification event constructs a new notification event message from
// input request metadata which completed successfully.
func newNotificationEvent(event eventData) NotificationEvent {
getResponseOriginEndpointKey := func() string {
host := globalMinioHost
if host == "" {
// FIXME: Send FQDN or hostname of this machine than sending IP address.
host = localIP4.ToSlice()[0]
}
scheme := httpScheme
if globalIsSSL {
scheme = httpsScheme
}
return fmt.Sprintf("%s://%s:%s", scheme, host, globalMinioPort)
}
// Fetch the region.
region := serverConfig.GetRegion()
@@ -95,15 +116,7 @@ func newNotificationEvent(event eventData) NotificationEvent {
creds := serverConfig.GetCredential()
// Time when Minio finished processing the request.
eventTime := time.Now().UTC()
// API endpoint is captured here to be returned back
// to the client for it to differentiate from which
// server the request came from.
var apiEndpoint string
if len(globalAPIEndpoints) >= 1 {
apiEndpoint = globalAPIEndpoints[0]
}
eventTime := UTCNow()
// Fetch a hexadecimal representation of event time in nano seconds.
uniqueID := mustGetRequestID(eventTime)
@@ -115,7 +128,7 @@ func newNotificationEvent(event eventData) NotificationEvent {
// http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
nEvent := NotificationEvent{
EventVersion: eventVersion,
EventSource: eventSource,
EventSource: minioEventSource,
AwsRegion: region,
EventTime: eventTime.Format(timeFormatAMZ),
EventName: event.Type.String(),
@@ -125,7 +138,7 @@ func newNotificationEvent(event eventData) NotificationEvent {
responseRequestIDKey: uniqueID,
// Following is a custom response element to indicate
// event origin server endpoint.
responseOriginEndpointKey: apiEndpoint,
responseOriginEndpointKey: getResponseOriginEndpointKey(),
},
S3: eventMeta{
SchemaVersion: eventSchemaVersion,
@@ -136,6 +149,11 @@ func newNotificationEvent(event eventData) NotificationEvent {
ARN: bucketARNPrefix + event.Bucket,
},
},
Source: sourceInfo{
Host: event.Host,
Port: event.Port,
UserAgent: event.UserAgent,
},
}
// Escape the object name. For example "red flower.jpg" becomes "red+flower.jpg".
@@ -145,6 +163,7 @@ func newNotificationEvent(event eventData) NotificationEvent {
if event.Type == ObjectRemovedDelete {
nEvent.S3.Object = objectMeta{
Key: escapedObj,
VersionID: "1",
Sequencer: uniqueID,
}
return nEvent
@@ -152,10 +171,13 @@ func newNotificationEvent(event eventData) NotificationEvent {
// For all other events we should set ETag and Size.
nEvent.S3.Object = objectMeta{
Key: escapedObj,
ETag: event.ObjInfo.MD5Sum,
Size: event.ObjInfo.Size,
Sequencer: uniqueID,
Key: escapedObj,
ETag: event.ObjInfo.MD5Sum,
Size: event.ObjInfo.Size,
ContentType: event.ObjInfo.ContentType,
UserDefined: event.ObjInfo.UserDefined,
VersionID: "1",
Sequencer: uniqueID,
}
// Success.
@@ -630,7 +652,6 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
if !webhookN.Enable {
continue
}
if _, err := addQueueTarget(queueTargets, accountID, queueTypeWebhook, newWebhookNotify); err != nil {
return nil, err
}
@@ -674,6 +695,25 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
}
}
// Load MySQL targets, initialize their respective loggers.
for accountID, msqlN := range serverConfig.Notify.GetMySQL() {
if !msqlN.Enable {
continue
}
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeMySQL, newMySQLNotify); err != nil {
if _, ok := err.(net.Error); ok {
err = &net.OpError{
Op: "Connecting to " + queueARN,
Net: "tcp",
Err: err,
}
}
return nil, err
}
}
// Load Kafka targets, initialize their respective loggers.
for accountID, kafkaN := range serverConfig.Notify.GetKafka() {
if !kafkaN.Enable {

View File

@@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"fmt"
"net"
"reflect"
"testing"
"time"
@@ -40,11 +39,7 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) {
t.Fatal("Unable to create directories for FS backend. ", err)
}
defer removeRoots(disks)
endpoints, err := parseStorageEndpoints(disks)
if err != nil {
t.Fatal(err)
}
obj, _, err := initObjectLayer(endpoints)
obj, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
if err != nil {
t.Fatal("Unable to initialize FS backend.", err)
}
@@ -97,11 +92,7 @@ func TestInitEventNotifierWithPostgreSQL(t *testing.T) {
if err != nil {
t.Fatal("Unable to create directories for FS backend. ", err)
}
endpoints, err := parseStorageEndpoints(disks)
if err != nil {
t.Fatal(err)
}
fs, _, err := initObjectLayer(endpoints)
fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
if err != nil {
t.Fatal("Unable to initialize FS backend.", err)
}
@@ -128,11 +119,7 @@ func TestInitEventNotifierWithNATS(t *testing.T) {
if err != nil {
t.Fatal("Unable to create directories for FS backend. ", err)
}
endpoints, err := parseStorageEndpoints(disks)
if err != nil {
t.Fatal(err)
}
fs, _, err := initObjectLayer(endpoints)
fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
if err != nil {
t.Fatal("Unable to initialize FS backend.", err)
}
@@ -159,11 +146,7 @@ func TestInitEventNotifierWithWebHook(t *testing.T) {
if err != nil {
t.Fatal("Unable to create directories for FS backend. ", err)
}
endpoints, err := parseStorageEndpoints(disks)
if err != nil {
t.Fatal(err)
}
fs, _, err := initObjectLayer(endpoints)
fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
if err != nil {
t.Fatal("Unable to initialize FS backend.", err)
}
@@ -190,11 +173,7 @@ func TestInitEventNotifierWithAMQP(t *testing.T) {
if err != nil {
t.Fatal("Unable to create directories for FS backend. ", err)
}
endpoints, err := parseStorageEndpoints(disks)
if err != nil {
t.Fatal(err)
}
fs, _, err := initObjectLayer(endpoints)
fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
if err != nil {
t.Fatal("Unable to initialize FS backend.", err)
}
@@ -221,11 +200,7 @@ func TestInitEventNotifierWithElasticSearch(t *testing.T) {
if err != nil {
t.Fatal("Unable to create directories for FS backend. ", err)
}
endpoints, err := parseStorageEndpoints(disks)
if err != nil {
t.Fatal(err)
}
fs, _, err := initObjectLayer(endpoints)
fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
if err != nil {
t.Fatal("Unable to initialize FS backend.", err)
}
@@ -252,11 +227,7 @@ func TestInitEventNotifierWithRedis(t *testing.T) {
if err != nil {
t.Fatal("Unable to create directories for FS backend. ", err)
}
endpoints, err := parseStorageEndpoints(disks)
if err != nil {
t.Fatal(err)
}
fs, _, err := initObjectLayer(endpoints)
fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
if err != nil {
t.Fatal("Unable to initialize FS backend.", err)
}
@@ -276,15 +247,10 @@ func (s *TestPeerRPCServerData) Setup(t *testing.T) {
s.testServer = StartTestPeersRPCServer(t, s.serverType)
// setup port and minio addr
host, port, err := net.SplitHostPort(s.testServer.Server.Listener.Addr().String())
if err != nil {
t.Fatalf("Initialisation error: %v", err)
}
host, port := mustSplitHostPort(s.testServer.Server.Listener.Addr().String())
globalMinioHost = host
globalMinioPort = port
globalMinioAddr = getLocalAddress(
s.testServer.SrvCmdCfg,
)
globalMinioAddr = getEndpointsLocalAddr(s.testServer.endpoints)
// initialize the peer client(s)
initGlobalS3Peers(s.testServer.Disks)
@@ -557,7 +523,7 @@ func TestAddRemoveBucketListenerConfig(t *testing.T) {
}
// Add a topicConfig to an empty notificationConfig.
accountID := fmt.Sprintf("%d", time.Now().UTC().UnixNano())
accountID := fmt.Sprintf("%d", UTCNow().UnixNano())
accountARN := fmt.Sprintf(
"arn:minio:sqs:%s:%s:listen-%s",
serverConfig.GetRegion(),

86
cmd/file-logger.go Normal file
View File

@@ -0,0 +1,86 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"os"
"github.com/Sirupsen/logrus"
)
// FileLogger - file logger which logs to a file.
type FileLogger struct {
BaseLogTarget
Filename string `json:"filename"`
file *os.File
}
// Fire - log entry handler.
func (logger FileLogger) Fire(entry *logrus.Entry) (err error) {
if !logger.Enable {
return nil
}
msgBytes, err := logger.formatter.Format(entry)
if err != nil {
return err
}
if _, err = logger.file.Write(msgBytes); err != nil {
return err
}
err = logger.file.Sync()
return err
}
// String - represents ConsoleLogger as string.
func (logger FileLogger) String() string {
enableStr := "disabled"
if logger.Enable {
enableStr = "enabled"
}
return fmt.Sprintf("file:%s:%s", enableStr, logger.Filename)
}
// NewFileLogger - creates new file logger object.
func NewFileLogger(filename string) (logger FileLogger) {
logger.Enable = true
logger.formatter = new(logrus.JSONFormatter)
logger.Filename = filename
return logger
}
// InitFileLogger - initializes file logger.
func InitFileLogger(logger *FileLogger) (err error) {
if !logger.Enable {
return err
}
if logger.formatter == nil {
logger.formatter = new(logrus.JSONFormatter)
}
if logger.file == nil {
logger.file, err = os.OpenFile(logger.Filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0664)
}
return err
}

View File

@@ -768,31 +768,39 @@ func loadFormatXL(bootstrapDisks []StorageAPI, readQuorum int) (disks []StorageA
return reorderDisks(bootstrapDisks, formatConfigs)
}
func checkFormatXLValues(formatConfigs []*formatConfigV1) error {
for _, formatXL := range formatConfigs {
if formatXL == nil {
continue
}
// Validate format version and format type.
if formatXL.Version != "1" {
return fmt.Errorf("Unsupported version of backend format [%s] found", formatXL.Version)
}
if formatXL.Format != "xl" {
return fmt.Errorf("Unsupported backend format [%s] found", formatXL.Format)
}
if formatXL.XL.Version != "1" {
return fmt.Errorf("Unsupported XL backend format found [%s]", formatXL.XL.Version)
}
if len(formatConfigs) != len(formatXL.XL.JBOD) {
return fmt.Errorf("Number of disks %d did not match the backend format %d", len(formatConfigs), len(formatXL.XL.JBOD))
}
func checkFormatXLValue(formatXL *formatConfigV1) error {
// Validate format version and format type.
if formatXL.Version != "1" {
return fmt.Errorf("Unsupported version of backend format [%s] found", formatXL.Version)
}
if formatXL.Format != "xl" {
return fmt.Errorf("Unsupported backend format [%s] found", formatXL.Format)
}
if formatXL.XL.Version != "1" {
return fmt.Errorf("Unsupported XL backend format found [%s]", formatXL.XL.Version)
}
return nil
}
func checkFormatXLValues(formatConfigs []*formatConfigV1) (int, error) {
for i, formatXL := range formatConfigs {
if formatXL == nil {
continue
}
if err := checkFormatXLValue(formatXL); err != nil {
return i, err
}
if len(formatConfigs) != len(formatXL.XL.JBOD) {
return i, fmt.Errorf("Number of disks %d did not match the backend format %d",
len(formatConfigs), len(formatXL.XL.JBOD))
}
}
return -1, nil
}
// checkFormatXL - verifies if format.json format is intact.
func checkFormatXL(formatConfigs []*formatConfigV1) error {
if err := checkFormatXLValues(formatConfigs); err != nil {
if _, err := checkFormatXLValues(formatConfigs); err != nil {
return err
}
if err := checkJBODConsistency(formatConfigs); err != nil {

View File

@@ -273,12 +273,8 @@ func TestFormatXLHealFreshDisks(t *testing.T) {
if err != nil {
t.Fatal(err)
}
endpoints, err := parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// Create an instance of xl backend.
obj, _, err := initObjectLayer(endpoints)
obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Error(err)
}
@@ -309,12 +305,8 @@ func TestFormatXLHealFreshDisksErrorExpected(t *testing.T) {
if err != nil {
t.Fatal(err)
}
endpoints, err := parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// Create an instance of xl backend.
obj, _, err := initObjectLayer(endpoints)
obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Error(err)
}
@@ -596,12 +588,8 @@ func TestInitFormatXLErrors(t *testing.T) {
t.Fatal(err)
}
defer removeRoots(fsDirs)
endpoints, err := parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// Create an instance of xl backend.
obj, _, err := initObjectLayer(endpoints)
obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -702,12 +690,8 @@ func TestLoadFormatXLErrs(t *testing.T) {
}
defer removeRoots(fsDirs)
endpoints, err := parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// Create an instance of xl backend.
obj, _, err := initObjectLayer(endpoints)
obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -733,11 +717,7 @@ func TestLoadFormatXLErrs(t *testing.T) {
}
defer removeRoots(fsDirs)
endpoints, err = parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
obj, _, err = initObjectLayer(endpoints)
obj, _, err = initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -761,11 +741,7 @@ func TestLoadFormatXLErrs(t *testing.T) {
}
defer removeRoots(fsDirs)
endpoints, err = parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
obj, _, err = initObjectLayer(endpoints)
obj, _, err = initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -787,11 +763,7 @@ func TestLoadFormatXLErrs(t *testing.T) {
}
defer removeRoots(fsDirs)
endpoints, err = parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
obj, _, err = initObjectLayer(endpoints)
obj, _, err = initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -820,13 +792,8 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
t.Fatal(err)
}
endpoints, err := parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// Everything is fine, should return nil
obj, _, err := initObjectLayer(endpoints)
obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -842,13 +809,8 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
t.Fatal(err)
}
endpoints, err = parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// Disks 0..15 are nil
obj, _, err = initObjectLayer(endpoints)
obj, _, err = initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -866,13 +828,8 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
t.Fatal(err)
}
endpoints, err = parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// One disk returns Faulty Disk
obj, _, err = initObjectLayer(endpoints)
obj, _, err = initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -892,13 +849,8 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
t.Fatal(err)
}
endpoints, err = parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// One disk is not found, heal corrupted disks should return nil
obj, _, err = initObjectLayer(endpoints)
obj, _, err = initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -914,13 +866,8 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
t.Fatal(err)
}
endpoints, err = parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// Remove format.json of all disks
obj, _, err = initObjectLayer(endpoints)
obj, _, err = initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -940,13 +887,8 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
t.Fatal(err)
}
endpoints, err = parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// Corrupted format json in one disk
obj, _, err = initObjectLayer(endpoints)
obj, _, err = initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -976,13 +918,8 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
t.Fatal(err)
}
endpoints, err := parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// Everything is fine, should return nil
obj, _, err := initObjectLayer(endpoints)
obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -997,13 +934,8 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
t.Fatal(err)
}
endpoints, err = parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// Disks 0..15 are nil
obj, _, err = initObjectLayer(endpoints)
obj, _, err = initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -1021,13 +953,8 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
t.Fatal(err)
}
endpoints, err = parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// One disk returns Faulty Disk
obj, _, err = initObjectLayer(endpoints)
obj, _, err = initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -1047,13 +974,8 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
t.Fatal(err)
}
endpoints, err = parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// One disk is not found, heal corrupted disks should return nil
obj, _, err = initObjectLayer(endpoints)
obj, _, err = initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}
@@ -1069,13 +991,8 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
t.Fatal(err)
}
endpoints, err = parseStorageEndpoints(fsDirs)
if err != nil {
t.Fatal(err)
}
// Remove format.json of all disks
obj, _, err = initObjectLayer(endpoints)
obj, _, err = initObjectLayer(mustGetNewEndpointList(fsDirs...))
if err != nil {
t.Fatal(err)
}

View File

@@ -20,6 +20,7 @@ import (
"io"
"os"
pathutil "path"
"runtime"
)
// Removes only the file at given path does not remove
@@ -132,7 +133,7 @@ func fsStatDir(statDir string) (os.FileInfo, error) {
return nil, traceError(err)
}
fi, err := os.Stat(preparePath(statDir))
fi, err := osStat(preparePath(statDir))
if err != nil {
if os.IsNotExist(err) {
return nil, traceError(errVolumeNotFound)
@@ -159,7 +160,7 @@ func fsStatFile(statFile string) (os.FileInfo, error) {
return nil, traceError(err)
}
fi, err := os.Stat(preparePath(statFile))
fi, err := osStat(preparePath(statFile))
if err != nil {
if os.IsNotExist(err) {
return nil, traceError(errFileNotFound)
@@ -205,7 +206,7 @@ func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
}
// Stat to get the size of the file at path.
st, err := fr.Stat()
st, err := osStat(preparePath(readPath))
if err != nil {
return nil, 0, traceError(err)
}
@@ -343,7 +344,7 @@ func fsDeleteFile(basePath, deletePath string) error {
}
// Verify if the path exists.
pathSt, err := os.Stat(preparePath(deletePath))
pathSt, err := osStat(preparePath(deletePath))
if err != nil {
if os.IsNotExist(err) {
return traceError(errFileNotFound)
@@ -377,3 +378,49 @@ func fsDeleteFile(basePath, deletePath string) error {
return nil
}
// fsRemoveMeta safely removes a locked file and takes care of Windows special case
func fsRemoveMeta(basePath, deletePath, tmpDir string) error {
// Special case for windows please read through.
if runtime.GOOS == globalWindowsOSName {
// Ordinarily windows does not permit deletion or renaming of files still
// in use, but if all open handles to that file were opened with FILE_SHARE_DELETE
// then it can permit renames and deletions of open files.
//
// There are however some gotchas with this, and it is worth listing them here.
// Firstly, Windows never allows you to really delete an open file, rather it is
// flagged as delete pending and its entry in its directory remains visible
// (though no new file handles may be opened to it) and when the very last
// open handle to the file in the system is closed, only then is it truly
// deleted. Well, actually only sort of truly deleted, because Windows only
// appears to remove the file entry from the directory, but in fact that
// entry is merely hidden and actually still exists and attempting to create
// a file with the same name will return an access denied error. How long it
// silently exists for depends on a range of factors, but put it this way:
// if your code loops creating and deleting the same file name as you might
// when operating a lock file, you're going to see lots of random spurious
// access denied errors and truly dismal lock file performance compared to POSIX.
//
// We work-around these un-POSIX file semantics by taking a dual step to
// deleting files. Firstly, it renames the file to tmp location into multipartTmpBucket
// We always open files with FILE_SHARE_DELETE permission enabled, with that
// flag Windows permits renaming and deletion, and because the name was changed
// to a very random name somewhere not in its origin directory before deletion,
// you don't see those unexpected random errors when creating files with the
// same name as a recently deleted file as you do anywhere else on Windows.
// Because the file is probably not in its original containing directory any more,
// deletions of that directory will not fail with "directory not empty" as they
// otherwise normally would either.
tmpPath := pathJoin(tmpDir, mustGetUUID())
fsRenameFile(deletePath, tmpPath)
// Proceed to deleting the directory if empty
fsDeleteFile(basePath, pathutil.Dir(deletePath))
// Finally delete the renamed file.
return fsDeleteFile(tmpDir, tmpPath)
}
return fsDeleteFile(basePath, deletePath)
}

View File

@@ -18,7 +18,12 @@ package cmd
import (
"bytes"
"io/ioutil"
"os"
"path"
"testing"
"github.com/minio/minio/pkg/lock"
)
func TestFSStats(t *testing.T) {
@@ -396,3 +401,52 @@ func TestFSRemoves(t *testing.T) {
t.Fatal(err)
}
}
func TestFSRemoveMeta(t *testing.T) {
// create posix test setup
_, fsPath, err := newPosixTestSetup()
if err != nil {
t.Fatalf("Unable to create posix test setup, %s", err)
}
defer removeAll(fsPath)
// Setup test environment.
if err = fsMkdir(pathJoin(fsPath, "success-vol")); err != nil {
t.Fatalf("Unable to create directory, %s", err)
}
filePath := pathJoin(fsPath, "success-vol", "success-file")
var buf = make([]byte, 4096)
var reader = bytes.NewReader([]byte("Hello, world"))
if _, err = fsCreateFile(filePath, reader, buf, reader.Size()); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
rwPool := &fsIOPool{
readersMap: make(map[string]*lock.RLockedFile),
}
if _, err := rwPool.Open(filePath); err != nil {
t.Fatalf("Unable to lock file %s", filePath)
}
defer rwPool.Close(filePath)
tmpDir, tmpErr := ioutil.TempDir(globalTestTmpDir, "minio-")
if tmpErr != nil {
t.Fatal(tmpErr)
}
if err := fsRemoveMeta(fsPath, filePath, tmpDir); err != nil {
t.Fatalf("Unable to remove file, %s", err)
}
if _, err := osStat(preparePath(filePath)); !os.IsNotExist(err) {
t.Fatalf("`%s` file found though it should have been deleted.", filePath)
}
if _, err := osStat(preparePath(path.Dir(filePath))); !os.IsNotExist(err) {
t.Fatalf("`%s` parent directory found though it should have been deleted.", filePath)
}
}

View File

@@ -27,6 +27,7 @@ import (
"github.com/minio/minio/pkg/lock"
"github.com/minio/minio/pkg/mimedb"
"github.com/tidwall/gjson"
)
const (
@@ -66,7 +67,7 @@ func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo
Name: object,
}
// We set file into only if its valid.
// We set file info only if its valid.
objInfo.ModTime = timeSentinel
if fi != nil {
objInfo.ModTime = fi.ModTime()
@@ -144,29 +145,76 @@ func (m *fsMetaV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
return int64(len(metadataBytes)), nil
}
func parseFSVersion(fsMetaBuf []byte) string {
return gjson.GetBytes(fsMetaBuf, "version").String()
}
func parseFSFormat(fsMetaBuf []byte) string {
return gjson.GetBytes(fsMetaBuf, "format").String()
}
func parseFSRelease(fsMetaBuf []byte) string {
return gjson.GetBytes(fsMetaBuf, "minio.release").String()
}
func parseFSMetaMap(fsMetaBuf []byte) map[string]string {
// Get xlMetaV1.Meta map.
metaMapResult := gjson.GetBytes(fsMetaBuf, "meta").Map()
metaMap := make(map[string]string)
for key, valResult := range metaMapResult {
metaMap[key] = valResult.String()
}
return metaMap
}
func parseFSParts(fsMetaBuf []byte) []objectPartInfo {
// Parse the FS Parts.
partsResult := gjson.GetBytes(fsMetaBuf, "parts").Array()
partInfo := make([]objectPartInfo, len(partsResult))
for i, p := range partsResult {
info := objectPartInfo{}
info.Number = int(p.Get("number").Int())
info.Name = p.Get("name").String()
info.ETag = p.Get("etag").String()
info.Size = p.Get("size").Int()
partInfo[i] = info
}
return partInfo
}
func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
var metadataBytes []byte
var fsMetaBuf []byte
fi, err := lk.Stat()
if err != nil {
return 0, traceError(err)
}
metadataBytes, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
fsMetaBuf, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
if err != nil {
return 0, traceError(err)
}
if len(metadataBytes) == 0 {
if len(fsMetaBuf) == 0 {
return 0, traceError(io.EOF)
}
// Decode `fs.json` into fsMeta structure.
if err = json.Unmarshal(metadataBytes, m); err != nil {
return 0, traceError(err)
}
// obtain version.
m.Version = parseFSVersion(fsMetaBuf)
// obtain format.
m.Format = parseFSFormat(fsMetaBuf)
// obtain metadata.
m.Meta = parseFSMetaMap(fsMetaBuf)
// obtain parts info list.
m.Parts = parseFSParts(fsMetaBuf)
// obtain minio release date.
m.Minio.Release = parseFSRelease(fsMetaBuf)
// Success.
return int64(len(metadataBytes)), nil
return int64(len(fsMetaBuf)), nil
}
// FS metadata constants.

View File

@@ -18,7 +18,6 @@ package cmd
import (
"bytes"
"os"
"path/filepath"
"testing"
)
@@ -73,18 +72,6 @@ func TestReadFSMetadata(t *testing.T) {
if _, err = fsMeta.ReadFrom(rlk.LockedFile); err != nil {
t.Fatal("Unexpected error ", err)
}
// Corrupted fs.json
file, err := os.OpenFile(preparePath(fsPath), os.O_APPEND|os.O_WRONLY, 0666)
if err != nil {
t.Fatal("Unexpected error ", err)
}
file.Write([]byte{'a'})
file.Close()
fsMeta = fsMetaV1{}
if _, err := fsMeta.ReadFrom(rlk.LockedFile); err == nil {
t.Fatal("Should fail", err)
}
}
// TestWriteFSMetadata - tests of writeFSMetadata with healthy disk.

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,7 +24,6 @@ import (
"io"
"os"
pathutil "path"
"runtime"
"strings"
"time"
@@ -46,56 +45,15 @@ func (fs fsObjects) isMultipartUpload(bucket, prefix string) bool {
return true
}
// Delete uploads.json file wrapper handling a tricky case on windows.
// Delete uploads.json file wrapper
func (fs fsObjects) deleteUploadsJSON(bucket, object, uploadID string) error {
timeID := fmt.Sprintf("%X", time.Now().UTC().UnixNano())
tmpPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID+"+"+timeID)
multipartBucketPath := pathJoin(fs.fsPath, minioMetaMultipartBucket)
uploadPath := pathJoin(multipartBucketPath, bucket, object)
uploadsMetaPath := pathJoin(uploadPath, uploadsJSONFile)
// Special case for windows please read through.
if runtime.GOOS == globalWindowsOSName {
// Ordinarily windows does not permit deletion or renaming of files still
// in use, but if all open handles to that file were opened with FILE_SHARE_DELETE
// then it can permit renames and deletions of open files.
//
// There are however some gotchas with this, and it is worth listing them here.
// Firstly, Windows never allows you to really delete an open file, rather it is
// flagged as delete pending and its entry in its directory remains visible
// (though no new file handles may be opened to it) and when the very last
// open handle to the file in the system is closed, only then is it truly
// deleted. Well, actually only sort of truly deleted, because Windows only
// appears to remove the file entry from the directory, but in fact that
// entry is merely hidden and actually still exists and attempting to create
// a file with the same name will return an access denied error. How long it
// silently exists for depends on a range of factors, but put it this way:
// if your code loops creating and deleting the same file name as you might
// when operating a lock file, you're going to see lots of random spurious
// access denied errors and truly dismal lock file performance compared to POSIX.
//
// We work-around these un-POSIX file semantics by taking a dual step to
// deleting files. Firstly, it renames the file to tmp location into multipartTmpBucket
// We always open files with FILE_SHARE_DELETE permission enabled, with that
// flag Windows permits renaming and deletion, and because the name was changed
// to a very random name somewhere not in its origin directory before deletion,
// you don't see those unexpected random errors when creating files with the
// same name as a recently deleted file as you do anywhere else on Windows.
// Because the file is probably not in its original containing directory any more,
// deletions of that directory will not fail with "directory not empty" as they
// otherwise normally would either.
fsRenameFile(uploadsMetaPath, tmpPath)
tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID)
// Proceed to deleting the directory.
if err := fsDeleteFile(multipartBucketPath, uploadPath); err != nil {
return err
}
// Finally delete the renamed file.
return fsDeleteFile(pathutil.Dir(tmpPath), tmpPath)
}
return fsDeleteFile(multipartBucketPath, uploadsMetaPath)
return fsRemoveMeta(multipartBucketPath, uploadsMetaPath, tmpDir)
}
// Removes the uploadID, called either by CompleteMultipart of AbortMultipart. If the resuling uploads
@@ -380,7 +338,7 @@ func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[st
fsMeta.Meta = meta
uploadID = mustGetUUID()
initiated := time.Now().UTC()
initiated := UTCNow()
// Add upload ID to uploads.json
uploadsPath := pathJoin(bucket, object, uploadsJSONFile)

View File

@@ -22,6 +22,7 @@ import (
"fmt"
"hash"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
@@ -84,7 +85,7 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
return nil, err
}
fi, err := os.Stat(preparePath(fsPath))
fi, err := osStat(preparePath(fsPath))
if err == nil {
if !fi.IsDir() {
return nil, syscall.ENOTDIR
@@ -223,7 +224,7 @@ func (fs fsObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
return BucketInfo{}, toObjectErr(err, bucket)
}
// As os.Stat() doesn't carry other than ModTime(), use ModTime() as CreatedTime.
// As osStat() doesn't carry other than ModTime(), use ModTime() as CreatedTime.
createdTime := st.ModTime()
return BucketInfo{
Name: bucket,
@@ -262,7 +263,7 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
bucketInfos = append(bucketInfos, BucketInfo{
Name: fi.Name(),
// As os.Stat() doesnt carry CreatedTime, use ModTime() as CreatedTime.
// As osStat() doesnt carry CreatedTime, use ModTime() as CreatedTime.
Created: fi.ModTime(),
})
}
@@ -491,7 +492,9 @@ func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
// until EOF, writes data directly to configured filesystem path.
// Additionally writes `fs.json` which carries the necessary metadata
// for future object operations.
func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) {
func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, retErr error) {
var err error
// This is a special case with size as '0' and object ends with
// a slash separator, we treat it like a valid operation and
// return success.
@@ -516,13 +519,21 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
var wlk *lock.LockedFile
if bucket != minioMetaBucket {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix)
fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fsMetaJSONFile)
wlk, err = fs.rwPool.Create(fsMetaPath)
if err != nil {
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
}
// This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close()
defer func() {
// Remove meta file when PutObject encounters any error
if retErr != nil {
tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID)
fsRemoveMeta(bucketMetaDir, fsMetaPath, tmpDir)
}
}()
}
// Uploaded object will first be written to the temporary location which will eventually
@@ -687,6 +698,41 @@ func (fs fsObjects) listDirFactory(isLeaf isLeafFunc) listDirFunc {
return listDir
}
// getObjectETag is a helper function, which returns only the md5sum
// of the file on the disk.
func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, entry, fsMetaJSONFile)
// Read `fs.json` to perhaps contend with
// parallel Put() operations.
rlk, err := fs.rwPool.Open(fsMetaPath)
// Ignore if `fs.json` is not available, this is true for pre-existing data.
if err != nil && err != errFileNotFound {
return "", toObjectErr(traceError(err), bucket, entry)
}
// If file is not found, we don't need to proceed forward.
if err == errFileNotFound {
return "", nil
}
// Read from fs metadata only if it exists.
defer fs.rwPool.Close(fsMetaPath)
fsMetaBuf, err := ioutil.ReadAll(rlk.LockedFile)
if err != nil {
// `fs.json` can be empty due to previously failed
// PutObject() transaction, if we arrive at such
// a situation we just ignore and continue.
if errorCause(err) != io.EOF {
return "", toObjectErr(err, bucket, entry)
}
}
fsMetaMap := parseFSMetaMap(fsMetaBuf)
return fsMetaMap["md5Sum"], nil
}
// ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool
// state for future re-entrant list requests.
func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
@@ -731,14 +777,33 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
objInfo.IsDir = true
return
}
// Protect reading `fs.json`.
objectLock := globalNSMutex.NewNSLock(bucket, entry)
objectLock.RLock()
var md5Sum string
md5Sum, err = fs.getObjectETag(bucket, entry)
objectLock.RUnlock()
if err != nil {
return ObjectInfo{}, err
}
// Stat the file to get file size.
var fi os.FileInfo
fi, err = fsStatFile(pathJoin(fs.fsPath, bucket, entry))
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, entry)
}
fsMeta := fsMetaV1{}
return fsMeta.ToObjectInfo(bucket, entry, fi), nil
// Success.
return ObjectInfo{
Name: entry,
Bucket: bucket,
Size: fi.Size(),
ModTime: fi.ModTime(),
IsDir: fi.IsDir(),
MD5Sum: md5Sum,
}, nil
}
heal := false // true only for xl.ListObjectsHeal()
@@ -809,8 +874,8 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
}
// HealObject - no-op for fs. Valid only for XL.
func (fs fsObjects) HealObject(bucket, object string) error {
return traceError(NotImplemented{})
func (fs fsObjects) HealObject(bucket, object string) (int, int, error) {
return 0, 0, traceError(NotImplemented{})
}
// HealBucket - no-op for fs, Valid only for XL.

View File

@@ -303,7 +303,7 @@ func TestFSHealObject(t *testing.T) {
defer removeAll(disk)
obj := initFSObjects(disk, t)
err := obj.HealObject("bucket", "object")
_, _, err := obj.HealObject("bucket", "object")
if err == nil || !isSameType(errorCause(err), NotImplemented{}) {
t.Fatalf("Heal Object should return NotImplemented error ")
}

View File

@@ -60,8 +60,8 @@ func (a AzureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, e
// AnonGetObject - SendGET request without authentication.
// This is needed when clients send GET requests on objects that can be downloaded without auth.
func (a AzureObjects) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) {
url := a.client.GetBlobURL(bucket, object)
req, err := http.NewRequest("GET", url, nil)
u := a.client.GetBlobURL(bucket, object)
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return azureToObjectError(traceError(err), bucket, object)
}

View File

@@ -27,8 +27,8 @@ func (a AzureObjects) ListBucketsHeal() (buckets []BucketInfo, err error) {
}
// HealObject - Not relevant.
func (a AzureObjects) HealObject(bucket, object string) error {
return traceError(NotImplemented{})
func (a AzureObjects) HealObject(bucket, object string) (int, int, error) {
return 0, 0, traceError(NotImplemented{})
}
// ListObjectsHeal - Not relevant.

View File

@@ -154,6 +154,12 @@ func (a AzureObjects) StorageInfo() StorageInfo {
// MakeBucket - Create a new container on azure backend.
func (a AzureObjects) MakeBucket(bucket string) error {
// will never be called, only satisfy ObjectLayer interface
return traceError(NotImplemented{})
}
// MakeBucketWithLocation - Create a new container on azure backend.
func (a AzureObjects) MakeBucketWithLocation(bucket, location string) error {
err := a.client.CreateContainer(bucket, storage.ContainerAccessTypePrivate)
return azureToObjectError(traceError(err), bucket)
}
@@ -366,7 +372,7 @@ func (a AzureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMa
// NewMultipartUpload.
return result, nil
}
result.Uploads = []uploadMetadata{{prefix, prefix, time.Now().UTC(), "", nil}}
result.Uploads = []uploadMetadata{{prefix, prefix, UTCNow(), "", nil}}
return result, nil
}
@@ -441,7 +447,7 @@ func (a AzureObjects) PutObjectPart(bucket, object, uploadID string, partID int,
info.PartNumber = partID
info.ETag = md5Hex
info.LastModified = time.Now().UTC()
info.LastModified = UTCNow()
info.Size = size
return info, nil
}
@@ -478,7 +484,7 @@ func (a AzureObjects) ListObjectParts(bucket, object, uploadID string, partNumbe
}
result.Parts = append(result.Parts, PartInfo{
partID,
time.Now().UTC(),
UTCNow(),
md5Hex,
part.Size,
})
@@ -592,7 +598,15 @@ func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values {
// storage.ContainerAccessTypePrivate - none in minio terminology
// As the common denominator for minio and azure is readonly and none, we support
// these two policies at the bucket level.
func (a AzureObjects) SetBucketPolicies(bucket string, policies []BucketAccessPolicy) error {
func (a AzureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
var policies []BucketAccessPolicy
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
policies = append(policies, BucketAccessPolicy{
Prefix: prefix,
Policy: policy,
})
}
prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 {
return traceError(NotImplemented{})
@@ -612,18 +626,21 @@ func (a AzureObjects) SetBucketPolicies(bucket string, policies []BucketAccessPo
}
// GetBucketPolicies - Get the container ACL and convert it to canonical []bucketAccessPolicy
func (a AzureObjects) GetBucketPolicies(bucket string) ([]BucketAccessPolicy, error) {
func (a AzureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
perm, err := a.client.GetContainerPermissions(bucket, 0, "")
if err != nil {
return nil, azureToObjectError(traceError(err), bucket)
return policy.BucketAccessPolicy{}, azureToObjectError(traceError(err), bucket)
}
switch perm.AccessType {
case storage.ContainerAccessTypePrivate:
return nil, nil
// Do nothing
case storage.ContainerAccessTypeContainer:
return []BucketAccessPolicy{{"", policy.BucketPolicyReadOnly}}, nil
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "")
default:
return policy.BucketAccessPolicy{}, azureToObjectError(traceError(NotImplemented{}))
}
return nil, azureToObjectError(traceError(NotImplemented{}))
return policyInfo, nil
}
// DeleteBucketPolicies - Set the container ACL to "private"

View File

@@ -17,7 +17,6 @@
package cmd
import (
"bytes"
"io"
"io/ioutil"
"net/http"
@@ -354,37 +353,13 @@ func (api gatewayAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *h
return
}
{
// FIXME: consolidate bucketPolicy and policy.BucketAccessPolicy so that
// the verification below is done on the same type.
// Parse bucket policy.
policyInfo := &bucketPolicy{}
err = parseBucketPolicy(bytes.NewReader(policyBytes), policyInfo)
if err != nil {
errorIf(err, "Unable to parse bucket policy.")
writeErrorResponse(w, ErrInvalidPolicyDocument, r.URL)
return
}
// Parse check bucket policy.
if s3Error := checkBucketPolicyResources(bucket, policyInfo); s3Error != ErrNone {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
}
policyInfo := &policy.BucketAccessPolicy{}
if err = json.Unmarshal(policyBytes, policyInfo); err != nil {
policyInfo := policy.BucketAccessPolicy{}
if err = json.Unmarshal(policyBytes, &policyInfo); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
var policies []BucketAccessPolicy
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
policies = append(policies, BucketAccessPolicy{
Prefix: prefix,
Policy: policy,
})
}
if err = objAPI.SetBucketPolicies(bucket, policies); err != nil {
if err = objAPI.SetBucketPolicies(bucket, policyInfo); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@@ -453,17 +428,14 @@ func (api gatewayAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *h
return
}
policies, err := objAPI.GetBucketPolicies(bucket)
bp, err := objAPI.GetBucketPolicies(bucket)
if err != nil {
errorIf(err, "Unable to read bucket policy.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
for _, p := range policies {
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, p.Policy, bucket, p.Prefix)
}
policyBytes, err := json.Marshal(&policyInfo)
policyBytes, err := json.Marshal(bp)
if err != nil {
errorIf(err, "Unable to read bucket policy.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
@@ -499,6 +471,65 @@ func (api gatewayAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWri
writeErrorResponse(w, ErrNotImplemented, r.URL)
}
// PutBucketHandler - PUT Bucket
// ----------
// This implementation of the PUT operation creates a new bucket for authenticated request
func (api gatewayAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
// PutBucket does not have any bucket action.
s3Error := checkRequestAuthType(r, "", "", globalMinioDefaultRegion)
if s3Error == ErrInvalidRegion {
// Clients like boto3 send putBucket() call signed with region that is configured.
s3Error = checkRequestAuthType(r, "", "", serverConfig.GetRegion())
}
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
vars := router.Vars(r)
bucket := vars["bucket"]
// Validate if incoming location constraint is valid, reject
// requests which do not follow valid region requirements.
location, s3Error := parseLocationConstraint(r)
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
// validating region here, because isValidLocationConstraint
// reads body which has been read already. So only validating
// region here.
serverRegion := serverConfig.GetRegion()
if serverRegion != location {
writeErrorResponse(w, ErrInvalidRegion, r.URL)
return
}
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.Lock()
defer bucketLock.Unlock()
// Proceed to creating a bucket.
err := objectAPI.MakeBucketWithLocation(bucket, location)
if err != nil {
errorIf(err, "Unable to create a bucket.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Make sure to add Location information here only for bucket
w.Header().Set("Location", getLocation(r))
writeSuccessResponseHeadersOnly(w)
}
// DeleteBucketHandler - Delete bucket
func (api gatewayAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()

View File

@@ -17,6 +17,7 @@
package cmd
import (
"errors"
"fmt"
"net/url"
"os"
@@ -24,7 +25,6 @@ import (
"github.com/gorilla/mux"
"github.com/minio/cli"
"github.com/minio/mc/pkg/console"
)
var gatewayTemplate = `NAME:
@@ -38,6 +38,7 @@ FLAGS:
{{end}}{{end}}
BACKEND:
azure: Microsoft Azure Blob Storage. Default ENDPOINT is https://core.windows.net
s3: Amazon Simple Storage Service (S3). Default ENDPOINT is https://s3.amazonaws.com
ENVIRONMENT VARIABLES:
ACCESS:
@@ -49,6 +50,16 @@ EXAMPLES:
$ export MINIO_ACCESS_KEY=azureaccountname
$ export MINIO_SECRET_KEY=azureaccountkey
$ {{.HelpName}} azure
2. Start minio gateway server for AWS S3 backend.
$ export MINIO_ACCESS_KEY=accesskey
$ export MINIO_SECRET_KEY=secretkey
$ {{.HelpName}} s3
3. Start minio gateway server for S3 backend on custom endpoint.
$ export MINIO_ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
$ export MINIO_SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
$ {{.HelpName}} s3 https://play.minio.io:9000
`
var gatewayCmd = cli.Command{
@@ -70,6 +81,7 @@ type gatewayBackend string
const (
azureBackend gatewayBackend = "azure"
s3Backend gatewayBackend = "s3"
// Add more backends here.
)
@@ -79,7 +91,7 @@ func mustGetGatewayCredsFromEnv() (accessKey, secretKey string) {
accessKey = os.Getenv("MINIO_ACCESS_KEY")
secretKey = os.Getenv("MINIO_SECRET_KEY")
if accessKey == "" || secretKey == "" {
console.Fatalln("Access and secret keys are mandatory to run Minio gateway server.")
fatalIf(errors.New("Missing credentials"), "Access and secret keys are mandatory to run Minio gateway server.")
}
return accessKey, secretKey
}
@@ -89,11 +101,15 @@ func mustGetGatewayCredsFromEnv() (accessKey, secretKey string) {
//
// - Azure Blob Storage.
// - Add your favorite backend here.
func newGatewayLayer(backendType, endPoint, accessKey, secretKey string, secure bool) (GatewayLayer, error) {
if gatewayBackend(backendType) != azureBackend {
return nil, fmt.Errorf("Unrecognized backend type %s", backendType)
func newGatewayLayer(backendType, endpoint, accessKey, secretKey string, secure bool) (GatewayLayer, error) {
switch gatewayBackend(backendType) {
case azureBackend:
return newAzureLayer(endpoint, accessKey, secretKey, secure)
case s3Backend:
return newS3Gateway(endpoint, accessKey, secretKey, secure)
}
return newAzureLayer(endPoint, accessKey, secretKey, secure)
return nil, fmt.Errorf("Unrecognized backend type %s", backendType)
}
// Initialize a new gateway config.
@@ -102,7 +118,7 @@ func newGatewayLayer(backendType, endPoint, accessKey, secretKey string, secure
// only used in memory.
func newGatewayConfig(accessKey, secretKey, region string) error {
// Initialize server config.
srvCfg := newServerConfigV14()
srvCfg := newServerConfigV18()
// If env is set for a fresh start, save them to config file.
srvCfg.SetCredential(credential{
@@ -110,17 +126,9 @@ func newGatewayConfig(accessKey, secretKey, region string) error {
SecretKey: secretKey,
})
// Set default printing to console.
srvCfg.Logger.SetConsole(consoleLogger{true, "error"})
// Set custom region.
srvCfg.SetRegion(region)
// Create certs path for SSL configuration.
if err := createConfigDir(); err != nil {
return err
}
// hold the mutex lock before a new config is assigned.
// Save the new config globally.
// unlock the mutex.
@@ -138,6 +146,7 @@ func parseGatewayEndpoint(arg string) (endPoint string, secure bool, err error)
// Default connection will be "secure".
arg = "https://" + arg
}
u, err := url.Parse(arg)
if err != nil {
return "", false, err
@@ -167,16 +176,13 @@ func gatewayMain(ctx *cli.Context) {
// TODO: add support for custom region when we add
// support for S3 backend storage, currently this can
// default to "us-east-1"
err := newGatewayConfig(accessKey, secretKey, "us-east-1")
if err != nil {
console.Fatalf("Unable to initialize gateway config. Error: %s", err)
}
// Enable console logging.
enableConsoleLogger()
newGatewayConfig(accessKey, secretKey, globalMinioDefaultRegion)
// Get quiet flag from command line argument.
quietFlag := ctx.Bool("quiet") || ctx.GlobalBool("quiet")
if quietFlag {
log.EnableQuiet()
}
// First argument is selected backend type.
backendType := ctx.Args().First()
@@ -184,20 +190,13 @@ func gatewayMain(ctx *cli.Context) {
// Second argument is endpoint. If no endpoint is specified then the
// gateway implementation should use a default setting.
endPoint, secure, err := parseGatewayEndpoint(ctx.Args().Get(1))
if err != nil {
console.Fatalf("Unable to parse endpoint. Error: %s", err)
}
fatalIf(err, "Unable to parse endpoint")
// Create certs path for SSL configuration.
err = createConfigDir()
if err != nil {
console.Fatalf("Unable to create configuration directory. Error: %s", err)
}
fatalIf(createConfigDir(), "Unable to create configuration directory")
newObject, err := newGatewayLayer(backendType, endPoint, accessKey, secretKey, secure)
if err != nil {
console.Fatalf("Unable to initialize gateway layer. Error: %s", err)
}
fatalIf(err, "Unable to initialize gateway layer")
initNSLock(false) // Enable local namespace lock.
@@ -226,8 +225,8 @@ func gatewayMain(ctx *cli.Context) {
apiServer := NewServerMux(ctx.String("address"), registerHandlers(router, handlerFns...))
// Set if we are SSL enabled S3 gateway.
globalIsSSL = isSSL()
_, _, globalIsSSL, err = getSSLConfig()
fatalIf(err, "Invalid SSL key file")
// Start server, automatically configures TLS if certs are available.
go func() {
@@ -235,13 +234,10 @@ func gatewayMain(ctx *cli.Context) {
if globalIsSSL {
cert, key = getPublicCertFile(), getPrivateKeyFile()
}
if aerr := apiServer.ListenAndServe(cert, key); aerr != nil {
console.Fatalf("Failed to start minio server. Error: %s\n", aerr)
}
}()
apiEndPoints, err := finalizeAPIEndpoints(apiServer.Addr)
fatalIf(err, "Unable to finalize API endpoints for %s", apiServer.Addr)
aerr := apiServer.ListenAndServe(cert, key)
fatalIf(aerr, "Failed to start minio server")
}()
// Once endpoints are finalized, initialize the new object api.
globalObjLayerMutex.Lock()
@@ -253,9 +249,12 @@ func gatewayMain(ctx *cli.Context) {
mode := ""
if gatewayBackend(backendType) == azureBackend {
mode = globalMinioModeGatewayAzure
} else if gatewayBackend(backendType) == s3Backend {
mode = globalMinioModeGatewayS3
}
checkUpdate(mode)
printGatewayStartupMessage(apiEndPoints, accessKey, secretKey, backendType)
apiEndpoints := getAPIEndpoints(apiServer.Addr)
printGatewayStartupMessage(apiEndpoints, accessKey, secretKey, backendType)
}
<-globalServiceDoneCh

View File

@@ -20,15 +20,19 @@ import (
"io"
router "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/policy"
)
// GatewayLayer - Interface to implement gateway mode.
type GatewayLayer interface {
ObjectLayer
MakeBucketWithLocation(bucket, location string) error
AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error)
AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
SetBucketPolicies(string, []BucketAccessPolicy) error
GetBucketPolicies(string) ([]BucketAccessPolicy, error)
SetBucketPolicies(string, policy.BucketAccessPolicy) error
GetBucketPolicies(string) (policy.BucketAccessPolicy, error)
DeleteBucketPolicies(string) error
AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error)

View File

@@ -0,0 +1,91 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"io"
minio "github.com/minio/minio-go"
)
// AnonGetObject - Get object anonymously
func (l *s3Gateway) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
r := minio.NewGetReqHeaders()
if err := r.SetRange(startOffset, startOffset+length-1); err != nil {
return s3ToObjectError(traceError(err), bucket, key)
}
object, _, err := l.anonClient.GetObject(bucket, key, r)
if err != nil {
return s3ToObjectError(traceError(err), bucket, key)
}
defer object.Close()
if _, err := io.CopyN(writer, object, length); err != nil {
return s3ToObjectError(traceError(err), bucket, key)
}
return nil
}
// AnonGetObjectInfo - Get object info anonymously
func (l *s3Gateway) AnonGetObjectInfo(bucket string, object string) (ObjectInfo, error) {
r := minio.NewHeadReqHeaders()
oi, err := l.anonClient.StatObject(bucket, object, r)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
}
// AnonListObjects - List objects anonymously
func (l *s3Gateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketResult(bucket, result), nil
}
// AnonGetBucketInfo - Get bucket metadata anonymously.
func (l *s3Gateway) AnonGetBucketInfo(bucket string) (BucketInfo, error) {
if exists, err := l.anonClient.BucketExists(bucket); err != nil {
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
} else if !exists {
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
}
buckets, err := l.anonClient.ListBuckets()
if err != nil {
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
}
for _, bi := range buckets {
if bi.Name != bucket {
continue
}
return BucketInfo{
Name: bi.Name,
Created: bi.CreationDate,
}, nil
}
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
}

View File

@@ -0,0 +1,42 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
// HealBucket - Not relevant.
func (l *s3Gateway) HealBucket(bucket string) error {
return traceError(NotImplemented{})
}
// ListBucketsHeal - Not relevant.
func (l *s3Gateway) ListBucketsHeal() (buckets []BucketInfo, err error) {
return []BucketInfo{}, traceError(NotImplemented{})
}
// HealObject - Not relevant.
func (l *s3Gateway) HealObject(bucket string, object string) (int, int, error) {
return 0, 0, traceError(NotImplemented{})
}
// ListObjectsHeal - Not relevant.
func (l *s3Gateway) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
return ListObjectsInfo{}, traceError(NotImplemented{})
}
// ListUploadsHeal - Not relevant.
func (l *s3Gateway) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
return ListMultipartsInfo{}, traceError(NotImplemented{})
}

570
cmd/gateway-s3.go Normal file
View File

@@ -0,0 +1,570 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"io"
"net/http"
"path"
"encoding/hex"
minio "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/policy"
)
// s3ToObjectError converts Minio errors to minio object layer errors.
func s3ToObjectError(err error, params ...string) error {
if err == nil {
return nil
}
e, ok := err.(*Error)
if !ok {
// Code should be fixed if this function is called without doing traceError()
// Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error")
return err
}
err = e.e
bucket := ""
object := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
minioErr, ok := err.(minio.ErrorResponse)
if !ok {
// We don't interpret non Minio errors. As minio errors will
// have StatusCode to help to convert to object errors.
return e
}
switch minioErr.Code {
case "BucketAlreadyOwnedByYou":
err = BucketAlreadyOwnedByYou{}
case "BucketNotEmpty":
err = BucketNotEmpty{}
case "NoSuchBucketPolicy":
err = PolicyNotFound{}
case "InvalidBucketName":
err = BucketNameInvalid{Bucket: bucket}
case "NoSuchBucket":
err = BucketNotFound{Bucket: bucket}
case "NoSuchKey":
if object != "" {
err = ObjectNotFound{Bucket: bucket, Object: object}
} else {
err = BucketNotFound{Bucket: bucket}
}
case "XMinioInvalidObjectName":
err = ObjectNameInvalid{}
case "AccessDenied":
err = PrefixAccessDenied{
Bucket: bucket,
Object: object,
}
case "XAmzContentSHA256Mismatch":
err = SHA256Mismatch{}
}
e.e = err
return e
}
// s3Gateway implements gateway for Minio and S3 compatible object storage servers.
type s3Gateway struct {
Client *minio.Core
anonClient *minio.Core
}
// newS3Gateway returns s3 gatewaylayer
func newS3Gateway(endpoint string, accessKey, secretKey string, secure bool) (GatewayLayer, error) {
if endpoint == "" {
endpoint = "s3.amazonaws.com"
secure = true
}
// Initialize minio client object.
client, err := minio.NewCore(endpoint, accessKey, secretKey, secure)
if err != nil {
return nil, err
}
anonClient, err := minio.NewCore(endpoint, "", "", secure)
if err != nil {
return nil, err
}
return &s3Gateway{
Client: client,
anonClient: anonClient,
}, nil
}
// Shutdown saves any gateway metadata to disk
// if necessary and reload upon next restart.
func (l *s3Gateway) Shutdown() error {
// TODO
return nil
}
// StorageInfo is not relevant to S3 backend.
func (l *s3Gateway) StorageInfo() StorageInfo {
return StorageInfo{}
}
// MakeBucket creates a new container on S3 backend.
func (l *s3Gateway) MakeBucket(bucket string) error {
// will never be called, only satisfy ObjectLayer interface
return traceError(NotImplemented{})
}
// MakeBucket creates a new container on S3 backend.
func (l *s3Gateway) MakeBucketWithLocation(bucket, location string) error {
err := l.Client.MakeBucket(bucket, location)
if err != nil {
return s3ToObjectError(traceError(err), bucket)
}
return err
}
// GetBucketInfo gets bucket metadata..
func (l *s3Gateway) GetBucketInfo(bucket string) (BucketInfo, error) {
buckets, err := l.Client.ListBuckets()
if err != nil {
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
}
for _, bi := range buckets {
if bi.Name != bucket {
continue
}
return BucketInfo{
Name: bi.Name,
Created: bi.CreationDate,
}, nil
}
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
}
// ListBuckets lists all S3 buckets
func (l *s3Gateway) ListBuckets() ([]BucketInfo, error) {
buckets, err := l.Client.ListBuckets()
if err != nil {
return nil, err
}
b := make([]BucketInfo, len(buckets))
for i, bi := range buckets {
b[i] = BucketInfo{
Name: bi.Name,
Created: bi.CreationDate,
}
}
return b, err
}
// DeleteBucket deletes a bucket on S3
func (l *s3Gateway) DeleteBucket(bucket string) error {
err := l.Client.RemoveBucket(bucket)
if err != nil {
return s3ToObjectError(traceError(err), bucket)
}
return nil
}
// ListObjects lists all blobs in S3 bucket filtered by prefix
func (l *s3Gateway) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketResult(bucket, result), nil
}
// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix
func (l *s3Gateway) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (ListObjectsV2Info, error) {
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil {
return ListObjectsV2Info{}, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketV2Result(bucket, result), nil
}
// fromMinioClientListBucketV2Result converts minio ListBucketResult to ListObjectsInfo
func fromMinioClientListBucketV2Result(bucket string, result minio.ListBucketV2Result) ListObjectsV2Info {
objects := make([]ObjectInfo, len(result.Contents))
for i, oi := range result.Contents {
objects[i] = fromMinioClientObjectInfo(bucket, oi)
}
prefixes := make([]string, len(result.CommonPrefixes))
for i, p := range result.CommonPrefixes {
prefixes[i] = p.Prefix
}
return ListObjectsV2Info{
IsTruncated: result.IsTruncated,
Prefixes: prefixes,
Objects: objects,
ContinuationToken: result.ContinuationToken,
NextContinuationToken: result.NextContinuationToken,
}
}
// fromMinioClientListBucketResult converts minio ListBucketResult to ListObjectsInfo
func fromMinioClientListBucketResult(bucket string, result minio.ListBucketResult) ListObjectsInfo {
objects := make([]ObjectInfo, len(result.Contents))
for i, oi := range result.Contents {
objects[i] = fromMinioClientObjectInfo(bucket, oi)
}
prefixes := make([]string, len(result.CommonPrefixes))
for i, p := range result.CommonPrefixes {
prefixes[i] = p.Prefix
}
return ListObjectsInfo{
IsTruncated: result.IsTruncated,
NextMarker: result.NextMarker,
Prefixes: prefixes,
Objects: objects,
}
}
// GetObject reads an object from S3. Supports additional
// parameters like offset and length which are synonymous with
// HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (l *s3Gateway) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
r := minio.NewGetReqHeaders()
if err := r.SetRange(startOffset, startOffset+length-1); err != nil {
return s3ToObjectError(traceError(err), bucket, key)
}
object, _, err := l.Client.GetObject(bucket, key, r)
if err != nil {
return s3ToObjectError(traceError(err), bucket, key)
}
defer object.Close()
if _, err := io.CopyN(writer, object, length); err != nil {
return s3ToObjectError(traceError(err), bucket, key)
}
return nil
}
// fromMinioClientObjectInfo converts minio ObjectInfo to gateway ObjectInfo
func fromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo {
userDefined := fromMinioClientMetadata(oi.Metadata)
userDefined["Content-Type"] = oi.ContentType
return ObjectInfo{
Bucket: bucket,
Name: oi.Key,
ModTime: oi.LastModified,
Size: oi.Size,
MD5Sum: oi.ETag,
UserDefined: userDefined,
ContentType: oi.ContentType,
ContentEncoding: oi.Metadata.Get("Content-Encoding"),
}
}
// GetObjectInfo reads object info and replies back ObjectInfo
func (l *s3Gateway) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) {
r := minio.NewHeadReqHeaders()
oi, err := l.Client.StatObject(bucket, object, r)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
}
// PutObject creates a new object with the incoming data,
func (l *s3Gateway) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) {
var sha256sumBytes []byte
var err error
if sha256sum != "" {
sha256sumBytes, err = hex.DecodeString(sha256sum)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
}
}
var md5sumBytes []byte
md5sum := metadata["md5Sum"]
if md5sum != "" {
md5sumBytes, err = hex.DecodeString(md5sum)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
}
delete(metadata, "md5Sum")
}
oi, err := l.Client.PutObject(bucket, object, size, data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata))
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
}
// CopyObject copies a blob from source container to destination container.
func (l *s3Gateway) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (ObjectInfo, error) {
err := l.Client.CopyObject(destBucket, destObject, path.Join(srcBucket, srcObject), minio.CopyConditions{})
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), srcBucket, srcObject)
}
oi, err := l.GetObjectInfo(destBucket, destObject)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), destBucket, destObject)
}
return oi, nil
}
// DeleteObject deletes a blob in bucket
func (l *s3Gateway) DeleteObject(bucket string, object string) error {
err := l.Client.RemoveObject(bucket, object)
if err != nil {
return s3ToObjectError(traceError(err), bucket, object)
}
return nil
}
// fromMinioClientUploadMetadata converts ObjectMultipartInfo to uploadMetadata
func fromMinioClientUploadMetadata(omi minio.ObjectMultipartInfo) uploadMetadata {
return uploadMetadata{
Object: omi.Key,
UploadID: omi.UploadID,
Initiated: omi.Initiated,
}
}
// fromMinioClientListMultipartsInfo converts minio ListMultipartUploadsResult to ListMultipartsInfo
func fromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) ListMultipartsInfo {
uploads := make([]uploadMetadata, len(lmur.Uploads))
for i, um := range lmur.Uploads {
uploads[i] = fromMinioClientUploadMetadata(um)
}
commonPrefixes := make([]string, len(lmur.CommonPrefixes))
for i, cp := range lmur.CommonPrefixes {
commonPrefixes[i] = cp.Prefix
}
return ListMultipartsInfo{
KeyMarker: lmur.KeyMarker,
UploadIDMarker: lmur.UploadIDMarker,
NextKeyMarker: lmur.NextKeyMarker,
NextUploadIDMarker: lmur.NextUploadIDMarker,
MaxUploads: int(lmur.MaxUploads),
IsTruncated: lmur.IsTruncated,
Uploads: uploads,
Prefix: lmur.Prefix,
Delimiter: lmur.Delimiter,
CommonPrefixes: commonPrefixes,
EncodingType: lmur.EncodingType,
}
}
// ListMultipartUploads lists all multipart uploads.
func (l *s3Gateway) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
result, err := l.Client.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
return ListMultipartsInfo{}, err
}
return fromMinioClientListMultipartsInfo(result), nil
}
// fromMinioClientMetadata converts minio metadata to map[string]string
func fromMinioClientMetadata(metadata map[string][]string) map[string]string {
mm := map[string]string{}
for k, v := range metadata {
mm[http.CanonicalHeaderKey(k)] = v[0]
}
return mm
}
// toMinioClientMetadata converts metadata to map[string][]string
func toMinioClientMetadata(metadata map[string]string) map[string][]string {
mm := map[string][]string{}
for k, v := range metadata {
mm[http.CanonicalHeaderKey(k)] = []string{v}
}
return mm
}
// NewMultipartUpload upload object in multiple parts
func (l *s3Gateway) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) {
return l.Client.NewMultipartUpload(bucket, object, toMinioClientMetadata(metadata))
}
// CopyObjectPart copy part of object to other bucket and object
func (l *s3Gateway) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) {
// FIXME: implement CopyObjectPart
return PartInfo{}, traceError(NotImplemented{})
}
// fromMinioClientObjectPart converts minio ObjectPart to PartInfo
func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
return PartInfo{
Size: op.Size,
ETag: op.ETag,
LastModified: op.LastModified,
PartNumber: op.PartNumber,
}
}
// PutObjectPart puts a part of object in bucket
func (l *s3Gateway) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) {
md5HexBytes, err := hex.DecodeString(md5Hex)
if err != nil {
return PartInfo{}, err
}
sha256sumBytes, err := hex.DecodeString(sha256sum)
if err != nil {
return PartInfo{}, err
}
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, size, data, md5HexBytes, sha256sumBytes)
if err != nil {
return PartInfo{}, err
}
return fromMinioClientObjectPart(info), nil
}
// fromMinioClientObjectParts converts minio ObjectPart to PartInfo
func fromMinioClientObjectParts(parts []minio.ObjectPart) []PartInfo {
toParts := make([]PartInfo, len(parts))
for i, part := range parts {
toParts[i] = fromMinioClientObjectPart(part)
}
return toParts
}
// fromMinioClientListPartsInfo converts minio ListObjectPartsResult to ListPartsInfo
func fromMinioClientListPartsInfo(lopr minio.ListObjectPartsResult) ListPartsInfo {
return ListPartsInfo{
UploadID: lopr.UploadID,
Bucket: lopr.Bucket,
Object: lopr.Key,
StorageClass: "",
PartNumberMarker: lopr.PartNumberMarker,
NextPartNumberMarker: lopr.NextPartNumberMarker,
MaxParts: lopr.MaxParts,
IsTruncated: lopr.IsTruncated,
EncodingType: lopr.EncodingType,
Parts: fromMinioClientObjectParts(lopr.ObjectParts),
}
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (l *s3Gateway) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (ListPartsInfo, error) {
result, err := l.Client.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil {
return ListPartsInfo{}, err
}
return fromMinioClientListPartsInfo(result), nil
}
// AbortMultipartUpload aborts a ongoing multipart upload
func (l *s3Gateway) AbortMultipartUpload(bucket string, object string, uploadID string) error {
return l.Client.AbortMultipartUpload(bucket, object, uploadID)
}
// toMinioClientCompletePart converts completePart to minio CompletePart
func toMinioClientCompletePart(part completePart) minio.CompletePart {
return minio.CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
}
}
// toMinioClientCompleteParts converts []completePart to minio []CompletePart
func toMinioClientCompleteParts(parts []completePart) []minio.CompletePart {
mparts := make([]minio.CompletePart, len(parts))
for i, part := range parts {
mparts[i] = toMinioClientCompletePart(part)
}
return mparts
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (l *s3Gateway) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (ObjectInfo, error) {
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, toMinioClientCompleteParts(uploadedParts))
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
}
return l.GetObjectInfo(bucket, object)
}
// SetBucketPolicies sets policy on bucket
func (l *s3Gateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil {
return s3ToObjectError(traceError(err), bucket, "")
}
return nil
}
// GetBucketPolicies will get policy on bucket
func (l *s3Gateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
policyInfo, err := l.Client.GetBucketPolicy(bucket)
if err != nil {
return policy.BucketAccessPolicy{}, s3ToObjectError(traceError(err), bucket, "")
}
return policyInfo, nil
}
// DeleteBucketPolicies deletes all policies on bucket
func (l *s3Gateway) DeleteBucketPolicies(bucket string) error {
if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil {
return s3ToObjectError(traceError(err), bucket, "")
}
return nil
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -15,20 +15,3 @@
*/
package cmd
import "net/url"
type byHostPath []*url.URL
func (s byHostPath) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byHostPath) Len() int {
return len(s)
}
// Note: Host in url.URL includes the port too.
func (s byHostPath) Less(i, j int) bool {
return (s[i].Host + s[i].Path) < (s[j].Host + s[j].Path)
}

View File

@@ -20,8 +20,6 @@ import (
"fmt"
"runtime"
"strings"
"github.com/minio/mc/pkg/console"
)
// Prints the formatted startup message.
@@ -34,13 +32,13 @@ func printGatewayStartupMessage(apiEndPoints []string, accessKey, secretKey, bac
endPoint := apiEndPoints[0]
// Configure 'mc', following block prints platform specific information for minio client.
console.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
log.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
if runtime.GOOS == globalWindowsOSName {
mcMessage := fmt.Sprintf("$ mc.exe config host add my%s %s %s %s", backendType, endPoint, accessKey, secretKey)
console.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
} else {
mcMessage := fmt.Sprintf("$ mc config host add my%s %s %s %s", backendType, endPoint, accessKey, secretKey)
console.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
}
// Prints documentation message.
@@ -49,11 +47,7 @@ func printGatewayStartupMessage(apiEndPoints []string, accessKey, secretKey, bac
// SSL is configured reads certification chain, prints
// authority and expiry.
if globalIsSSL {
certs, err := readCertificateChain()
if err != nil {
console.Fatalf("Unable to read certificate chain. Error: %s", err)
}
printCertificateMsg(certs)
printCertificateMsg(globalPublicCerts)
}
}
@@ -61,7 +55,7 @@ func printGatewayStartupMessage(apiEndPoints []string, accessKey, secretKey, bac
func printGatewayCommonMsg(apiEndpoints []string, accessKey, secretKey string) {
apiEndpointStr := strings.Join(apiEndpoints, " ")
// Colorize the message and print.
console.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
console.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", accessKey)))
console.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", secretKey)))
log.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
log.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", accessKey)))
log.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", secretKey)))
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -238,7 +238,7 @@ func (h timeValidityHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
// Verify if the request date header is shifted by less than globalMaxSkewTime parameter in the past
// or in the future, reject request otherwise.
curTime := time.Now().UTC()
curTime := UTCNow()
if curTime.Sub(amzDate) > globalMaxSkewTime || amzDate.Sub(curTime) > globalMaxSkewTime {
writeErrorResponse(w, ErrRequestTimeTooSkewed, r.URL)
return
@@ -274,10 +274,11 @@ var defaultAllowableHTTPMethods = []string{
// setCorsHandler handler for CORS (Cross Origin Resource Sharing)
func setCorsHandler(h http.Handler) http.Handler {
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: defaultAllowableHTTPMethods,
AllowedHeaders: []string{"*"},
ExposedHeaders: []string{"ETag"},
AllowedOrigins: []string{"*"},
AllowedMethods: defaultAllowableHTTPMethods,
AllowedHeaders: []string{"*"},
ExposedHeaders: []string{"ETag"},
AllowCredentials: true,
})
return c.Handler(h)
}
@@ -401,11 +402,23 @@ func (h httpStatsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Wraps w to record http response information
ww := &httpResponseRecorder{ResponseWriter: w}
// Time start before the call is about to start.
tBefore := UTCNow()
// Execute the request
h.handler.ServeHTTP(ww, r)
// Time after call has completed.
tAfter := UTCNow()
// Time duration in secs since the call started.
//
// We don't need to do nanosecond precision in this
// simply for the fact that it is not human readable.
durationSecs := tAfter.Sub(tBefore).Seconds()
// Update http statistics
globalHTTPStats.updateStats(r, ww)
globalHTTPStats.updateStats(r, ww, durationSecs)
}
// pathValidityHandler validates all the incoming paths for

View File

@@ -18,7 +18,6 @@ package cmd
import (
"crypto/x509"
"net/url"
"runtime"
"time"
@@ -40,6 +39,7 @@ const (
globalMinioModeXL = "mode-server-xl"
globalMinioModeDistXL = "mode-server-distributed-xl"
globalMinioModeGatewayAzure = "mode-gateway-azure"
globalMinioModeGatewayS3 = "mode-gateway-s3"
// Add new global values here.
)
@@ -66,9 +66,15 @@ var (
globalIsBrowserEnabled = true
// This flag is set to 'true' when MINIO_BROWSER env is set.
globalIsEnvBrowser = false
// Set to true if credentials were passed from env, default is false.
globalIsEnvCreds = false
// This flag is set to 'true' wen MINIO_REGION env is set.
globalIsEnvRegion = false
// This flag is set to 'us-east-1' by default
globalServerRegion = globalMinioDefaultRegion
// Maximum size of internal objects parts
globalPutPartSize = int64(64 * 1024 * 1024)
@@ -79,9 +85,6 @@ var (
// Holds the host that was passed using --address
globalMinioHost = ""
// Holds the list of API endpoints for a given server.
globalAPIEndpoints = []string{}
// Peer communication struct
globalS3Peers = s3Peers{}
@@ -97,8 +100,7 @@ var (
// Minio server user agent string.
globalServerUserAgent = "Minio/" + ReleaseTag + " (" + runtime.GOOS + "; " + runtime.GOARCH + ")"
// url.URL endpoints of disks that belong to the object storage.
globalEndpoints = []*url.URL{}
globalEndpoints EndpointList
// Global server's network statistics
globalConnStats = newConnStats()
@@ -109,6 +111,9 @@ var (
// Time when object layer was initialized on start up.
globalBootTime time.Time
globalActiveCred credential
globalPublicCerts []*x509.Certificate
globalXLObjCacheDisabled bool
// Add new variable global values here.
)

View File

@@ -24,36 +24,29 @@ import (
"strings"
)
// Validates location constraint in PutBucket request body.
// The location value in the request body should match the
// region configured at serverConfig, otherwise error is returned.
func isValidLocationConstraint(r *http.Request) (s3Error APIErrorCode) {
serverRegion := serverConfig.GetRegion()
// Parses location constraint from the incoming reader.
func parseLocationConstraint(r *http.Request) (location string, s3Error APIErrorCode) {
// If the request has no body with content-length set to 0,
// we do not have to validate location constraint. Bucket will
// be created at default region.
locationConstraint := createBucketLocationConfiguration{}
err := xmlDecoder(r.Body, &locationConstraint, r.ContentLength)
if err == nil || err == io.EOF {
// Successfully decoded, proceed to verify the region.
// Once region has been obtained we proceed to verify it.
incomingRegion := locationConstraint.Location
if incomingRegion == "" {
// Location constraint is empty for region globalMinioDefaultRegion,
// in accordance with protocol.
incomingRegion = globalMinioDefaultRegion
}
// Return errInvalidRegion if location constraint does not match
// with configured region.
s3Error = ErrNone
if serverRegion != incomingRegion {
s3Error = ErrInvalidRegion
}
return s3Error
if err != nil && err != io.EOF {
errorIf(err, "Unable to xml decode location constraint")
// Treat all other failures as XML parsing errors.
return "", ErrMalformedXML
} // else for both err as nil or io.EOF
location = locationConstraint.Location
if location == "" {
location = globalMinioDefaultRegion
}
errorIf(err, "Unable to xml decode location constraint")
// Treat all other failures as XML parsing errors.
return ErrMalformedXML
return location, ErrNone
}
// Validates input location is same as configured region
// of Minio server.
func isValidLocation(location string) bool {
return serverConfig.GetRegion() == location
}
// Supported headers that needs to be extracted.
@@ -158,6 +151,23 @@ func extractReqParams(r *http.Request) map[string]string {
}
}
// Trims away `aws-chunked` from the content-encoding header if present.
// Streaming signature clients can have custom content-encoding such as
// `aws-chunked,gzip` here we need to only save `gzip`.
// For more refer http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string) {
if contentEnc == "" {
return contentEnc
}
var newEncs []string
for _, enc := range strings.Split(contentEnc, ",") {
if enc != streamingContentEncoding {
newEncs = append(newEncs, enc)
}
}
return strings.Join(newEncs, ",")
}
// extractMetadataFromForm extracts metadata from Post Form.
func extractMetadataFromForm(formValues http.Header) map[string]string {
return extractMetadataFromHeader(formValues)

View File

@@ -39,7 +39,7 @@ func TestIsValidLocationContraint(t *testing.T) {
Body: ioutil.NopCloser(bytes.NewBuffer([]byte("<>"))),
ContentLength: int64(len("<>")),
}
if err := isValidLocationConstraint(malformedReq); err != ErrMalformedXML {
if _, err := parseLocationConstraint(malformedReq); err != ErrMalformedXML {
t.Fatal("Unexpected error: ", err)
}
@@ -68,8 +68,6 @@ func TestIsValidLocationContraint(t *testing.T) {
// Test case - 2.
// In case of empty request body ErrNone is returned.
{"", globalMinioDefaultRegion, ErrNone},
// Test case - 3.
{"eu-central-1", globalMinioDefaultRegion, ErrInvalidRegion},
}
for i, testCase := range testCases {
inputRequest, e := createExpectedRequest(&http.Request{}, testCase.locationForInputRequest)
@@ -77,7 +75,7 @@ func TestIsValidLocationContraint(t *testing.T) {
t.Fatalf("Test %d: Failed to Marshal bucket configuration", i+1)
}
serverConfig.SetRegion(testCase.serverConfigRegion)
actualCode := isValidLocationConstraint(inputRequest)
_, actualCode := parseLocationConstraint(inputRequest)
if testCase.expectedCode != actualCode {
t.Errorf("Test %d: Expected the APIErrCode to be %d, but instead found %d", i+1, testCase.expectedCode, actualCode)
}

196
cmd/http-stats.go Normal file
View File

@@ -0,0 +1,196 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"net/http"
"time"
"go.uber.org/atomic"
)
// ConnStats - Network statistics
// Count total input/output transferred bytes during
// the server's life.
type ConnStats struct {
totalInputBytes atomic.Uint64
totalOutputBytes atomic.Uint64
}
// Increase total input bytes
func (s *ConnStats) incInputBytes(n int) {
s.totalInputBytes.Add(uint64(n))
}
// Increase total output bytes
func (s *ConnStats) incOutputBytes(n int) {
s.totalOutputBytes.Add(uint64(n))
}
// Return total input bytes
func (s *ConnStats) getTotalInputBytes() uint64 {
return s.totalInputBytes.Load()
}
// Return total output bytes
func (s *ConnStats) getTotalOutputBytes() uint64 {
return s.totalOutputBytes.Load()
}
// Return connection stats (total input/output bytes)
func (s *ConnStats) toServerConnStats() ServerConnStats {
return ServerConnStats{
TotalInputBytes: s.getTotalInputBytes(),
TotalOutputBytes: s.getTotalOutputBytes(),
}
}
// Prepare new ConnStats structure
func newConnStats() *ConnStats {
return &ConnStats{}
}
// HTTPMethodStats holds statistics information about
// a given HTTP method made by all clients
type HTTPMethodStats struct {
Counter atomic.Uint64
Duration atomic.Float64
}
// HTTPStats holds statistics information about
// HTTP requests made by all clients
type HTTPStats struct {
// HEAD request stats.
totalHEADs HTTPMethodStats
successHEADs HTTPMethodStats
// GET request stats.
totalGETs HTTPMethodStats
successGETs HTTPMethodStats
// PUT request stats.
totalPUTs HTTPMethodStats
successPUTs HTTPMethodStats
// POST request stats.
totalPOSTs HTTPMethodStats
successPOSTs HTTPMethodStats
// DELETE request stats.
totalDELETEs HTTPMethodStats
successDELETEs HTTPMethodStats
}
func durationStr(totalDuration, totalCount float64) string {
return fmt.Sprint(time.Duration(totalDuration/totalCount) * time.Second)
}
// Converts http stats into struct to be sent back to the client.
func (st HTTPStats) toServerHTTPStats() ServerHTTPStats {
serverStats := ServerHTTPStats{}
serverStats.TotalHEADStats = ServerHTTPMethodStats{
Count: st.totalHEADs.Counter.Load(),
AvgDuration: durationStr(st.totalHEADs.Duration.Load(), float64(st.totalHEADs.Counter.Load())),
}
serverStats.SuccessHEADStats = ServerHTTPMethodStats{
Count: st.successHEADs.Counter.Load(),
AvgDuration: durationStr(st.successHEADs.Duration.Load(), float64(st.successHEADs.Counter.Load())),
}
serverStats.TotalGETStats = ServerHTTPMethodStats{
Count: st.totalGETs.Counter.Load(),
AvgDuration: durationStr(st.totalGETs.Duration.Load(), float64(st.totalGETs.Counter.Load())),
}
serverStats.SuccessGETStats = ServerHTTPMethodStats{
Count: st.successGETs.Counter.Load(),
AvgDuration: durationStr(st.successGETs.Duration.Load(), float64(st.successGETs.Counter.Load())),
}
serverStats.TotalPUTStats = ServerHTTPMethodStats{
Count: st.totalPUTs.Counter.Load(),
AvgDuration: durationStr(st.totalPUTs.Duration.Load(), float64(st.totalPUTs.Counter.Load())),
}
serverStats.SuccessPUTStats = ServerHTTPMethodStats{
Count: st.successPUTs.Counter.Load(),
AvgDuration: durationStr(st.successPUTs.Duration.Load(), float64(st.successPUTs.Counter.Load())),
}
serverStats.TotalPOSTStats = ServerHTTPMethodStats{
Count: st.totalPOSTs.Counter.Load(),
AvgDuration: durationStr(st.totalPOSTs.Duration.Load(), float64(st.totalPOSTs.Counter.Load())),
}
serverStats.SuccessPOSTStats = ServerHTTPMethodStats{
Count: st.successPOSTs.Counter.Load(),
AvgDuration: durationStr(st.successPOSTs.Duration.Load(), float64(st.successPOSTs.Counter.Load())),
}
serverStats.TotalDELETEStats = ServerHTTPMethodStats{
Count: st.totalDELETEs.Counter.Load(),
AvgDuration: durationStr(st.totalDELETEs.Duration.Load(), float64(st.totalDELETEs.Counter.Load())),
}
serverStats.SuccessDELETEStats = ServerHTTPMethodStats{
Count: st.successDELETEs.Counter.Load(),
AvgDuration: durationStr(st.successDELETEs.Duration.Load(), float64(st.successDELETEs.Counter.Load())),
}
return serverStats
}
// Update statistics from http request and response data
func (st *HTTPStats) updateStats(r *http.Request, w *httpResponseRecorder, durationSecs float64) {
// A successful request has a 2xx response code
successReq := (w.respStatusCode >= 200 && w.respStatusCode < 300)
// Update stats according to method verb
switch r.Method {
case "HEAD":
st.totalHEADs.Counter.Inc()
st.totalHEADs.Duration.Add(durationSecs)
if successReq {
st.successHEADs.Counter.Inc()
st.successHEADs.Duration.Add(durationSecs)
}
case "GET":
st.totalGETs.Counter.Inc()
st.totalGETs.Duration.Add(durationSecs)
if successReq {
st.successGETs.Counter.Inc()
st.successGETs.Duration.Add(durationSecs)
}
case "PUT":
st.totalPUTs.Counter.Inc()
st.totalPUTs.Duration.Add(durationSecs)
if successReq {
st.successPUTs.Counter.Inc()
st.totalPUTs.Duration.Add(durationSecs)
}
case "POST":
st.totalPOSTs.Counter.Inc()
st.totalPOSTs.Duration.Add(durationSecs)
if successReq {
st.successPOSTs.Counter.Inc()
st.totalPOSTs.Duration.Add(durationSecs)
}
case "DELETE":
st.totalDELETEs.Counter.Inc()
st.totalDELETEs.Duration.Add(durationSecs)
if successReq {
st.successDELETEs.Counter.Inc()
st.successDELETEs.Duration.Add(durationSecs)
}
}
}
// Prepare new HTTPStats structure
func newHTTPStats() *HTTPStats {
return &HTTPStats{}
}

View File

@@ -1,88 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"math"
"time"
)
// humanizedDuration container to capture humanized time.
type humanizedDuration struct {
Days int64 `json:"days,omitempty"`
Hours int64 `json:"hours,omitempty"`
Minutes int64 `json:"minutes,omitempty"`
Seconds int64 `json:"seconds,omitempty"`
}
// StringShort() humanizes humanizedDuration to human readable short format.
// This does not print at seconds.
func (r humanizedDuration) StringShort() string {
if r.Days == 0 && r.Hours == 0 {
return fmt.Sprintf("%d minutes", r.Minutes)
}
if r.Days == 0 {
return fmt.Sprintf("%d hours %d minutes", r.Hours, r.Minutes)
}
return fmt.Sprintf("%d days %d hours %d minutes", r.Days, r.Hours, r.Minutes)
}
// String() humanizes humanizedDuration to human readable,
func (r humanizedDuration) String() string {
if r.Days == 0 && r.Hours == 0 && r.Minutes == 0 {
return fmt.Sprintf("%d seconds", r.Seconds)
}
if r.Days == 0 && r.Hours == 0 {
return fmt.Sprintf("%d minutes %d seconds", r.Minutes, r.Seconds)
}
if r.Days == 0 {
return fmt.Sprintf("%d hours %d minutes %d seconds", r.Hours, r.Minutes, r.Seconds)
}
return fmt.Sprintf("%d days %d hours %d minutes %d seconds", r.Days, r.Hours, r.Minutes, r.Seconds)
}
// timeDurationToHumanizedDuration convert golang time.Duration to a custom more readable humanizedDuration.
func timeDurationToHumanizedDuration(duration time.Duration) humanizedDuration {
r := humanizedDuration{}
if duration.Seconds() < 60.0 {
r.Seconds = int64(duration.Seconds())
return r
}
if duration.Minutes() < 60.0 {
remainingSeconds := math.Mod(duration.Seconds(), 60)
r.Seconds = int64(remainingSeconds)
r.Minutes = int64(duration.Minutes())
return r
}
if duration.Hours() < 24.0 {
remainingMinutes := math.Mod(duration.Minutes(), 60)
remainingSeconds := math.Mod(duration.Seconds(), 60)
r.Seconds = int64(remainingSeconds)
r.Minutes = int64(remainingMinutes)
r.Hours = int64(duration.Hours())
return r
}
remainingHours := math.Mod(duration.Hours(), 24)
remainingMinutes := math.Mod(duration.Minutes(), 60)
remainingSeconds := math.Mod(duration.Seconds(), 60)
r.Hours = int64(remainingHours)
r.Minutes = int64(remainingMinutes)
r.Seconds = int64(remainingSeconds)
r.Days = int64(duration.Hours() / 24)
return r
}

View File

@@ -1,75 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"testing"
"time"
)
// Test humanized duration.
func TestHumanizedDuration(t *testing.T) {
duration := time.Duration(90487000000000)
humanDuration := timeDurationToHumanizedDuration(duration)
if !hasSuffix(humanDuration.String(), "seconds") {
t.Fatal("Stringer method for humanized duration should have seconds.", humanDuration.String())
}
if hasSuffix(humanDuration.StringShort(), "seconds") {
t.Fatal("StringShorter method for humanized duration should not have seconds.", humanDuration.StringShort())
}
// Test humanized duration for seconds.
humanSecDuration := timeDurationToHumanizedDuration(time.Duration(5 * time.Second))
expectedHumanSecDuration := humanizedDuration{
Seconds: 5,
}
if humanSecDuration != expectedHumanSecDuration {
t.Fatalf("Expected %#v, got %#v incorrect conversion of duration to humanized form",
expectedHumanSecDuration, humanSecDuration)
}
if hasSuffix(humanSecDuration.String(), "days") ||
hasSuffix(humanSecDuration.String(), "hours") ||
hasSuffix(humanSecDuration.String(), "minutes") {
t.Fatal("Stringer method for humanized duration should have only seconds.", humanSecDuration.String())
}
// Test humanized duration for minutes.
humanMinDuration := timeDurationToHumanizedDuration(10 * time.Minute)
expectedHumanMinDuration := humanizedDuration{
Minutes: 10,
}
if humanMinDuration != expectedHumanMinDuration {
t.Fatalf("Expected %#v, got %#v incorrect conversion of duration to humanized form",
expectedHumanMinDuration, humanMinDuration)
}
if hasSuffix(humanMinDuration.String(), "hours") {
t.Fatal("Stringer method for humanized duration should have only minutes.", humanMinDuration.String())
}
// Test humanized duration for hours.
humanHourDuration := timeDurationToHumanizedDuration(10 * time.Hour)
expectedHumanHourDuration := humanizedDuration{
Hours: 10,
}
if humanHourDuration != expectedHumanHourDuration {
t.Fatalf("Expected %#v, got %#v incorrect conversion of duration to humanized form",
expectedHumanHourDuration, humanHourDuration)
}
if hasSuffix(humanHourDuration.String(), "days") {
t.Fatal("Stringer method for humanized duration should have hours.", humanHourDuration.String())
}
}

View File

@@ -1,69 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"net"
"sort"
)
// byLastOctetValue implements sort.Interface used in sorting a list
// of ip address by their last octet value.
type byLastOctetValue []net.IP
func (n byLastOctetValue) Len() int { return len(n) }
func (n byLastOctetValue) Swap(i, j int) { n[i], n[j] = n[j], n[i] }
func (n byLastOctetValue) Less(i, j int) bool {
return []byte(n[i].To4())[3] < []byte(n[j].To4())[3]
}
// getInterfaceIPv4s is synonymous to net.InterfaceAddrs()
// returns net.IP IPv4 only representation of the net.Addr.
// Additionally the returned list is sorted by their last
// octet value.
//
// [The logic to sort by last octet is implemented to
// prefer CIDRs with higher octects, this in-turn skips the
// localhost/loopback address to be not preferred as the
// first ip on the list. Subsequently this list helps us print
// a user friendly message with appropriate values].
func getInterfaceIPv4s() ([]net.IP, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
return nil, fmt.Errorf("Unable to determine network interface address. %s", err)
}
// Go through each return network address and collate IPv4 addresses.
var nips []net.IP
for _, addr := range addrs {
if addr.Network() == "ip+net" {
var nip net.IP
// Attempt to parse the addr through CIDR.
nip, _, err = net.ParseCIDR(addr.String())
if err != nil {
return nil, fmt.Errorf("Unable to parse addrss %s, error %s", addr, err)
}
// Collect only IPv4 addrs.
if nip.To4() != nil {
nips = append(nips, nip)
}
}
}
// Sort the list of IPs by their last octet value.
sort.Sort(sort.Reverse(byLastOctetValue(nips)))
return nips, nil
}

View File

@@ -59,7 +59,7 @@ func authenticateJWT(accessKey, secretKey string, expiry time.Duration) (string,
return "", errAuthentication
}
utcNow := time.Now().UTC()
utcNow := UTCNow()
token := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims{
"exp": utcNow.Add(expiry).Unix(),
"iat": utcNow.Unix(),

View File

@@ -54,7 +54,7 @@ func (initialSnapShot LeakDetect) DetectLeak(t TestErrHandler) {
}
// Loop, waiting for goroutines to shut down.
// Wait up to 5 seconds, but finish as quickly as possible.
deadline := time.Now().UTC().Add(leakDetectDeadline * time.Second)
deadline := UTCNow().Add(leakDetectDeadline * time.Second)
for {
// get sack snapshot of relevant go routines.
leaked := initialSnapShot.CompareCurrentSnapshot()
@@ -63,7 +63,7 @@ func (initialSnapShot LeakDetect) DetectLeak(t TestErrHandler) {
return
}
// wait a test again will deadline.
if time.Now().UTC().Before(deadline) {
if UTCNow().Before(deadline) {
time.Sleep(leakDetectPauseTimeMs * time.Millisecond)
continue
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -157,7 +157,7 @@ func newDebugLockInfo(lockSource string, status statusType, readLock bool) debug
lockSource: lockSource,
lType: lType,
status: status,
since: time.Now().UTC(),
since: UTCNow(),
}
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,10 +16,7 @@
package cmd
import (
"testing"
"time"
)
import "testing"
type lockStateCase struct {
volume string
@@ -144,7 +141,6 @@ func getSystemLockState() (SystemLockState, error) {
LockType: lockInfo.lType,
Status: lockInfo.status,
Since: lockInfo.since,
Duration: time.Now().UTC().Sub(lockInfo.since),
})
}
lockState.LocksInfoPerObject = append(lockState.LocksInfoPerObject, volLockInfo)
@@ -414,7 +410,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
globalNSMutex.debugLockMap[param].lockInfo[testCases[0].opsID] = debugLockInfo{
lockSource: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a",
status: "Running", // State set to "Running". Should fail with `LockInfoStateNotBlocked`.
since: time.Now().UTC(),
since: UTCNow(),
}
actualErr = globalNSMutex.statusBlockedToRunning(param, testCases[0].lockSource,

View File

@@ -19,7 +19,6 @@ package cmd
import (
"fmt"
"testing"
"time"
"github.com/minio/dsync"
)
@@ -29,10 +28,10 @@ func TestLockRPCClient(t *testing.T) {
lkClient := newLockRPCClient(authConfig{
accessKey: "abcd",
secretKey: "abcd123",
serverAddr: fmt.Sprintf("%X", time.Now().UTC().UnixNano()),
serviceEndpoint: pathJoin(lockRPCPath, "/test/1"),
serverAddr: fmt.Sprintf("%X", UTCNow().UnixNano()),
serviceEndpoint: pathJoin(lockServicePath, "/test/1"),
secureConn: false,
serviceName: "Dsync",
serviceName: lockServiceName,
})
// Attempt all calls.

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -29,7 +29,7 @@ func (l *lockServer) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
// Remove failed, in case it is a:
if nlrip.lri.writer {
// Writer: this should never happen as the whole (mapped) entry should have been deleted
errorIf(errors.New(""), "Lock maintenance failed to remove entry for write lock (should never happen)", nlrip.name, nlrip.lri.uid, lri)
errorIf(errors.New(""), "Lock maintenance failed to remove entry for write lock (should never happen) %#v %#v %#v", nlrip.name, nlrip.lri.uid, lri)
} // Reader: this can happen if multiple read locks were active and
// the one we are looking for has been released concurrently (so it is fine).
} // Removal went okay, all is fine.
@@ -66,7 +66,7 @@ func getLongLivedLocks(m map[string][]lockRequesterInfo, interval time.Duration)
// Check whether enough time has gone by since last check
if time.Since(lriArray[idx].timeLastCheck) >= interval {
rslt = append(rslt, nameLockRequesterInfoPair{name: name, lri: lriArray[idx]})
lriArray[idx].timeLastCheck = time.Now().UTC()
lriArray[idx].timeLastCheck = UTCNow()
}
}
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -28,12 +28,12 @@ func TestLockRpcServerRemoveEntryIfExists(t *testing.T) {
defer removeAll(testPath)
lri := lockRequesterInfo{
writer: false,
node: "host",
rpcPath: "rpc-path",
uid: "0123-4567",
timestamp: time.Now().UTC(),
timeLastCheck: time.Now().UTC(),
writer: false,
node: "host",
serviceEndpoint: "rpc-path",
uid: "0123-4567",
timestamp: UTCNow(),
timeLastCheck: UTCNow(),
}
nlrip := nameLockRequesterInfoPair{name: "name", lri: lri}
@@ -65,20 +65,20 @@ func TestLockRpcServerRemoveEntry(t *testing.T) {
defer removeAll(testPath)
lockRequesterInfo1 := lockRequesterInfo{
writer: true,
node: "host",
rpcPath: "rpc-path",
uid: "0123-4567",
timestamp: time.Now().UTC(),
timeLastCheck: time.Now().UTC(),
writer: true,
node: "host",
serviceEndpoint: "rpc-path",
uid: "0123-4567",
timestamp: UTCNow(),
timeLastCheck: UTCNow(),
}
lockRequesterInfo2 := lockRequesterInfo{
writer: true,
node: "host",
rpcPath: "rpc-path",
uid: "89ab-cdef",
timestamp: time.Now().UTC(),
timeLastCheck: time.Now().UTC(),
writer: true,
node: "host",
serviceEndpoint: "rpc-path",
uid: "89ab-cdef",
timestamp: UTCNow(),
timeLastCheck: UTCNow(),
}
locker.lockMap["name"] = []lockRequesterInfo{
@@ -116,7 +116,7 @@ func TestLockRpcServerRemoveEntry(t *testing.T) {
// Tests function returning long lived locks.
func TestLockRpcServerGetLongLivedLocks(t *testing.T) {
ut := time.Now().UTC()
ut := UTCNow()
// Collection of test cases for verifying returning valid long lived locks.
testCases := []struct {
lockMap map[string][]lockRequesterInfo
@@ -127,12 +127,12 @@ func TestLockRpcServerGetLongLivedLocks(t *testing.T) {
{
lockMap: map[string][]lockRequesterInfo{
"test": {{
writer: true,
node: "10.1.10.21",
rpcPath: "/lock/mnt/disk1",
uid: "10000112",
timestamp: ut,
timeLastCheck: ut,
writer: true,
node: "10.1.10.21",
serviceEndpoint: "/lock/mnt/disk1",
uid: "10000112",
timestamp: ut,
timeLastCheck: ut,
}},
},
lockInterval: 1 * time.Minute,
@@ -142,12 +142,12 @@ func TestLockRpcServerGetLongLivedLocks(t *testing.T) {
{
lockMap: map[string][]lockRequesterInfo{
"test": {{
writer: true,
node: "10.1.10.21",
rpcPath: "/lock/mnt/disk1",
uid: "10000112",
timestamp: ut,
timeLastCheck: ut.Add(-2 * time.Minute),
writer: true,
node: "10.1.10.21",
serviceEndpoint: "/lock/mnt/disk1",
uid: "10000112",
timestamp: ut,
timeLastCheck: ut.Add(-2 * time.Minute),
}},
},
lockInterval: 1 * time.Minute,
@@ -155,12 +155,12 @@ func TestLockRpcServerGetLongLivedLocks(t *testing.T) {
{
name: "test",
lri: lockRequesterInfo{
writer: true,
node: "10.1.10.21",
rpcPath: "/lock/mnt/disk1",
uid: "10000112",
timestamp: ut,
timeLastCheck: ut.Add(-2 * time.Minute),
writer: true,
node: "10.1.10.21",
serviceEndpoint: "/lock/mnt/disk1",
uid: "10000112",
timestamp: ut,
timeLastCheck: ut.Add(-2 * time.Minute),
},
},
},

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -30,7 +30,10 @@ import (
const (
// Lock rpc server endpoint.
lockRPCPath = "/lock"
lockServicePath = "/lock"
// Lock rpc service name.
lockServiceName = "Dsync"
// Lock maintenance interval.
lockMaintenanceInterval = 1 * time.Minute // 1 minute.
@@ -39,17 +42,17 @@ const (
lockValidityCheckInterval = 2 * time.Minute // 2 minutes.
)
// lockRequesterInfo stores various info from the client for each lock that is requested
// lockRequesterInfo stores various info from the client for each lock that is requested.
type lockRequesterInfo struct {
writer bool // Bool whether write or read lock
node string // Network address of client claiming lock
rpcPath string // RPC path of client claiming lock
uid string // Uid to uniquely identify request of client
timestamp time.Time // Timestamp set at the time of initialization
timeLastCheck time.Time // Timestamp for last check of validity of lock
writer bool // Bool whether write or read lock.
node string // Network address of client claiming lock.
serviceEndpoint string // RPC path of client claiming lock.
uid string // UID to uniquely identify request of client.
timestamp time.Time // Timestamp set at the time of initialization.
timeLastCheck time.Time // Timestamp for last check of validity of lock.
}
// isWriteLock returns whether the lock is a write or read lock
// isWriteLock returns whether the lock is a write or read lock.
func isWriteLock(lri []lockRequesterInfo) bool {
return len(lri) == 1 && lri[0].writer
}
@@ -57,9 +60,9 @@ func isWriteLock(lri []lockRequesterInfo) bool {
// lockServer is type for RPC handlers
type lockServer struct {
AuthRPCServer
rpcPath string
mutex sync.Mutex
lockMap map[string][]lockRequesterInfo
serviceEndpoint string
mutex sync.Mutex
lockMap map[string][]lockRequesterInfo
}
// Start lock maintenance from all lock servers.
@@ -87,9 +90,9 @@ func startLockMaintainence(lockServers []*lockServer) {
}
// Register distributed NS lock handlers.
func registerDistNSLockRouter(mux *router.Router, serverConfig serverCmdConfig) error {
func registerDistNSLockRouter(mux *router.Router, endpoints EndpointList) error {
// Initialize a new set of lock servers.
lockServers := newLockServers(serverConfig)
lockServers := newLockServers(endpoints)
// Start lock maintenance from all lock servers.
startLockMaintainence(lockServers)
@@ -99,19 +102,18 @@ func registerDistNSLockRouter(mux *router.Router, serverConfig serverCmdConfig)
}
// Create one lock server for every local storage rpc server.
func newLockServers(srvConfig serverCmdConfig) (lockServers []*lockServer) {
for _, ep := range srvConfig.endpoints {
func newLockServers(endpoints EndpointList) (lockServers []*lockServer) {
for _, endpoint := range endpoints {
// Initialize new lock server for each local node.
if isLocalStorage(ep) {
// Create handler for lock RPCs
locker := &lockServer{
rpcPath: getPath(ep),
mutex: sync.Mutex{},
lockMap: make(map[string][]lockRequesterInfo),
}
lockServers = append(lockServers, locker)
if endpoint.IsLocal {
lockServers = append(lockServers, &lockServer{
serviceEndpoint: endpoint.Path,
mutex: sync.Mutex{},
lockMap: make(map[string][]lockRequesterInfo),
})
}
}
return lockServers
}
@@ -119,11 +121,11 @@ func newLockServers(srvConfig serverCmdConfig) (lockServers []*lockServer) {
func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error {
for _, lockServer := range lockServers {
lockRPCServer := rpc.NewServer()
if err := lockRPCServer.RegisterName("Dsync", lockServer); err != nil {
if err := lockRPCServer.RegisterName(lockServiceName, lockServer); err != nil {
return traceError(err)
}
lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()
lockRouter.Path(path.Join(lockRPCPath, lockServer.rpcPath)).Handler(lockRPCServer)
lockRouter.Path(path.Join(lockServicePath, lockServer.serviceEndpoint)).Handler(lockRPCServer)
}
return nil
}
@@ -141,12 +143,12 @@ func (l *lockServer) Lock(args *LockArgs, reply *bool) error {
if !*reply { // No locks held on the given name, so claim write lock
l.lockMap[args.LockArgs.Resource] = []lockRequesterInfo{
{
writer: true,
node: args.LockArgs.ServerAddr,
rpcPath: args.LockArgs.ServiceEndpoint,
uid: args.LockArgs.UID,
timestamp: time.Now().UTC(),
timeLastCheck: time.Now().UTC(),
writer: true,
node: args.LockArgs.ServerAddr,
serviceEndpoint: args.LockArgs.ServiceEndpoint,
uid: args.LockArgs.UID,
timestamp: UTCNow(),
timeLastCheck: UTCNow(),
},
}
}
@@ -182,12 +184,12 @@ func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
return err
}
lrInfo := lockRequesterInfo{
writer: false,
node: args.LockArgs.ServerAddr,
rpcPath: args.LockArgs.ServiceEndpoint,
uid: args.LockArgs.UID,
timestamp: time.Now().UTC(),
timeLastCheck: time.Now().UTC(),
writer: false,
node: args.LockArgs.ServerAddr,
serviceEndpoint: args.LockArgs.ServiceEndpoint,
uid: args.LockArgs.UID,
timestamp: UTCNow(),
timeLastCheck: UTCNow(),
}
if lri, ok := l.lockMap[args.LockArgs.Resource]; ok {
if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock
@@ -288,13 +290,16 @@ func (l *lockServer) lockMaintenance(interval time.Duration) {
accessKey: serverCred.AccessKey,
secretKey: serverCred.SecretKey,
serverAddr: nlrip.lri.node,
serviceEndpoint: nlrip.lri.rpcPath,
secureConn: globalIsSSL,
serviceName: "Dsync",
serviceEndpoint: nlrip.lri.serviceEndpoint,
serviceName: lockServiceName,
})
// Call back to original server verify whether the lock is still active (based on name & uid)
expired, _ := c.Expired(dsync.LockArgs{UID: nlrip.lri.uid, Resource: nlrip.name})
expired, _ := c.Expired(dsync.LockArgs{
UID: nlrip.lri.uid,
Resource: nlrip.name,
})
// Close the connection regardless of the call response.
c.rpcClient.Close()

View File

@@ -17,11 +17,9 @@
package cmd
import (
"net/url"
"runtime"
"sync"
"testing"
"time"
"github.com/minio/dsync"
)
@@ -35,7 +33,7 @@ func testLockEquality(lriLeft, lriRight []lockRequesterInfo) bool {
for i := 0; i < len(lriLeft); i++ {
if lriLeft[i].writer != lriRight[i].writer ||
lriLeft[i].node != lriRight[i].node ||
lriLeft[i].rpcPath != lriRight[i].rpcPath ||
lriLeft[i].serviceEndpoint != lriRight[i].serviceEndpoint ||
lriLeft[i].uid != lriRight[i].uid {
return false
}
@@ -51,17 +49,17 @@ func createLockTestServer(t *testing.T) (string, *lockServer, string) {
}
locker := &lockServer{
AuthRPCServer: AuthRPCServer{},
rpcPath: "rpc-path",
mutex: sync.Mutex{},
lockMap: make(map[string][]lockRequesterInfo),
AuthRPCServer: AuthRPCServer{},
serviceEndpoint: "rpc-path",
mutex: sync.Mutex{},
lockMap: make(map[string][]lockRequesterInfo),
}
creds := serverConfig.GetCredential()
loginArgs := LoginRPCArgs{
Username: creds.AccessKey,
Password: creds.SecretKey,
Version: Version,
RequestTime: time.Now().UTC(),
RequestTime: UTCNow(),
}
loginReply := LoginRPCReply{}
err = locker.Login(&loginArgs, &loginReply)
@@ -98,10 +96,10 @@ func TestLockRpcServerLock(t *testing.T) {
gotLri, _ := locker.lockMap["name"]
expectedLri := []lockRequesterInfo{
{
writer: true,
node: "node",
rpcPath: "rpc-path",
uid: "0123-4567",
writer: true,
node: "node",
serviceEndpoint: "rpc-path",
uid: "0123-4567",
},
}
if !testLockEquality(expectedLri, gotLri) {
@@ -199,10 +197,10 @@ func TestLockRpcServerRLock(t *testing.T) {
gotLri, _ := locker.lockMap["name"]
expectedLri := []lockRequesterInfo{
{
writer: false,
node: "node",
rpcPath: "rpc-path",
uid: "0123-4567",
writer: false,
node: "node",
serviceEndpoint: "rpc-path",
uid: "0123-4567",
},
}
if !testLockEquality(expectedLri, gotLri) {
@@ -286,10 +284,10 @@ func TestLockRpcServerRUnlock(t *testing.T) {
gotLri, _ := locker.lockMap["name"]
expectedLri := []lockRequesterInfo{
{
writer: false,
node: "node",
rpcPath: "rpc-path",
uid: "89ab-cdef",
writer: false,
node: "node",
serviceEndpoint: "rpc-path",
uid: "89ab-cdef",
},
}
if !testLockEquality(expectedLri, gotLri) {
@@ -434,66 +432,46 @@ func TestLockServers(t *testing.T) {
globalIsDistXL = currentIsDistXL
}()
case1Endpoints := mustGetNewEndpointList(
"http://localhost:9000/mnt/disk1",
"http://1.1.1.2:9000/mnt/disk2",
"http://1.1.2.1:9000/mnt/disk3",
"http://1.1.2.2:9000/mnt/disk4",
)
for i := range case1Endpoints {
if case1Endpoints[i].Host == "localhost:9000" {
case1Endpoints[i].IsLocal = true
}
}
case2Endpoints := mustGetNewEndpointList(
"http://localhost:9000/mnt/disk1",
"http://localhost:9000/mnt/disk2",
"http://1.1.2.1:9000/mnt/disk3",
"http://1.1.2.2:9000/mnt/disk4",
)
for i := range case2Endpoints {
if case2Endpoints[i].Host == "localhost:9000" {
case2Endpoints[i].IsLocal = true
}
}
globalMinioHost = ""
testCases := []struct {
isDistXL bool
srvCmdConfig serverCmdConfig
endpoints EndpointList
totalLockServers int
}{
// Test - 1 one lock server initialized.
{
isDistXL: true,
srvCmdConfig: serverCmdConfig{
endpoints: []*url.URL{{
Scheme: httpScheme,
Host: "localhost:9000",
Path: "/mnt/disk1",
}, {
Scheme: httpScheme,
Host: "1.1.1.2:9000",
Path: "/mnt/disk2",
}, {
Scheme: httpScheme,
Host: "1.1.2.1:9000",
Path: "/mnt/disk3",
}, {
Scheme: httpScheme,
Host: "1.1.2.2:9000",
Path: "/mnt/disk4",
}},
},
totalLockServers: 1,
},
{true, case1Endpoints, 1},
// Test - 2 two servers possible.
{
isDistXL: true,
srvCmdConfig: serverCmdConfig{
endpoints: []*url.URL{{
Scheme: httpScheme,
Host: "localhost:9000",
Path: "/mnt/disk1",
}, {
Scheme: httpScheme,
Host: "localhost:9000",
Path: "/mnt/disk2",
}, {
Scheme: httpScheme,
Host: "1.1.2.1:9000",
Path: "/mnt/disk3",
}, {
Scheme: httpScheme,
Host: "1.1.2.2:9000",
Path: "/mnt/disk4",
}},
},
totalLockServers: 2,
},
{true, case2Endpoints, 2},
}
// Validates lock server initialization.
for i, testCase := range testCases {
globalIsDistXL = testCase.isDistXL
lockServers := newLockServers(testCase.srvCmdConfig)
lockServers := newLockServers(testCase.endpoints)
if len(lockServers) != testCase.totalLockServers {
t.Fatalf("Test %d: Expected total %d, got %d", i+1, testCase.totalLockServers, len(lockServers))
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -57,12 +57,11 @@ type VolumeLockInfo struct {
// OpsLockState - structure to fill in state information of the lock.
// structure to fill in status information for each operation with given operation ID.
type OpsLockState struct {
OperationID string `json:"id"` // String containing operation ID.
LockSource string `json:"source"` // Operation type (GetObject, PutObject...)
LockType lockType `json:"type"` // Lock type (RLock, WLock)
Status statusType `json:"status"` // Status can be Running/Ready/Blocked.
Since time.Time `json:"since"` // Time when the lock was initially held.
Duration time.Duration `json:"duration"` // Duration since the lock was held.
OperationID string `json:"id"` // String containing operation ID.
LockSource string `json:"source"` // Operation type (GetObject, PutObject...)
LockType lockType `json:"type"` // Lock type (RLock, WLock)
Status statusType `json:"status"` // Status can be Running/Ready/Blocked.
Since time.Time `json:"since"` // Time when the lock was initially held.
}
// listLocksInfo - Fetches locks held on bucket, matching prefix held for longer than duration.
@@ -71,7 +70,7 @@ func listLocksInfo(bucket, prefix string, duration time.Duration) []VolumeLockIn
defer globalNSMutex.lockMapMutex.Unlock()
// Fetch current time once instead of fetching system time for every lock.
timeNow := time.Now().UTC()
timeNow := UTCNow()
volumeLocks := []VolumeLockInfo{}
for param, debugLock := range globalNSMutex.debugLockMap {
@@ -105,7 +104,6 @@ func listLocksInfo(bucket, prefix string, duration time.Duration) []VolumeLockIn
LockType: lockInfo.lType,
Status: lockInfo.status,
Since: lockInfo.since,
Duration: elapsed,
})
volumeLocks = append(volumeLocks, volLockInfo)
}

Some files were not shown because too many files have changed in this diff Show More