Compare commits

...

105 Commits

Author SHA1 Message Date
Minio Trusted
1b472dae78 Bump to new release. 2017-04-28 17:58:49 -07:00
Krishna Srinivas
eb50175ad9 gateway: reject bad path segments in URL (#4202) 2017-04-28 17:26:13 -07:00
Krishna Srinivas
e85349381e gateway: Fix help message for gateway (#4201) 2017-04-28 17:26:00 -07:00
Krishna Srinivas
06bc68a4b3 gateway: Fix help message for custom Azure Blob Storage endpoint. (#4113) 2017-04-28 17:23:41 -07:00
Krishna Srinivas
fb506c7fca gateway: Support for custom endpoint. (#4086) 2017-04-28 17:23:23 -07:00
Harshavardhana
8a7cffe7b8 docs: macOS brew now refers to Minio fork (#4059) 2017-04-25 11:01:51 -07:00
Minio Trusted
5c85ce1afd Bump to new release. 2017-04-24 18:34:34 -07:00
Harshavardhana
710db6bdad build: Reduce binary size by using -s -w (#4027)
Refer #3939
2017-04-24 18:23:42 -07:00
Harshavardhana
058ea84605 server: Validate path for bad components in a handler. (#4170) 2017-04-24 18:22:40 -07:00
Minio Trusted
6e7d33df20 Bump to new release 2017-03-16 14:52:42 -07:00
Minio Trusted
5311eb22fd Fix dockerfile 2017-03-16 14:49:30 -07:00
Krishna Srinivas
cea4cfa3a8 Implement S3 Gateway to third party cloud storage providers. (#3756)
Currently supported backend is Azure Blob Storage.

```
export MINIO_ACCESS_KEY=azureaccountname
export MINIO_SECRET_KEY=azureaccountkey
minio gateway azure
```
2017-03-16 12:21:58 -07:00
Anis Elleuch
8426cf9aec config: Accept more address format + unit test (#3915)
checkURL() is a generic function to check if a passed address
is valid. This commit adds support for addresses like `m1`
and `172.16.3.1` which is needed in MySQL and NATS. This commit
also adds tests.
2017-03-16 11:44:01 -07:00
Harshavardhana
f3334159a4 config: Relax browser and region to be empty. (#3912)
- If browser field is missing or empty
  then default to "on".

- If region field is empty or missing then
  default to "us-east-1" (S3 spec behavior)
2017-03-16 11:06:17 -07:00
Harshavardhana
3edff1501e Vendor upstream redis library instead of our fork. (#3913)
We forked the upstream to address a build issue on
go 1.6 - that is long done and we don't need
to manage our forks anymore.
2017-03-16 08:22:47 -07:00
Bala FA
21d73a3eef Simplify credential usage. (#3893) 2017-03-16 00:16:06 -07:00
Krishnan Parthasarathi
051f9bb5c6 Implement list uploads heal admin API (#3885) 2017-03-16 00:15:06 -07:00
Harshavardhana
6509589adb Use canonicalETag helper wherever needed. (#3910) 2017-03-15 20:48:49 -07:00
Anis Elleuch
ae4361cc45 config: Check for duplicated entries in all scopes (#3872)
Validate Minio config by checking if there is double json key
in any scope level. The returned error contains the json path
to the duplicated key.
2017-03-15 16:30:34 -07:00
Krishna Srinivas
cad0d0eb7a browser: Update ui-assets.go (#3902)
fixes #3898
2017-03-15 11:32:51 -07:00
Nitish Tiwari
ba0c11757e Added that no special config changes reqd (#3906)
Added a line saying no special config changes are required for Shared mode. Also the previous `Why Shared Backend` and current `Use cases` are already merged. This fixes the comment: https://github.com/minio/minio/pull/3888#discussion_r105573494
2017-03-15 08:22:03 -07:00
Krishna Srinivas
96050c1e21 browser: Do not show "Loading..." if there are no buckets. (#3904) 2017-03-14 19:07:23 -07:00
Rushan
a27d1b3d86 Browser: Use babel-polyfill to support new ES6 built-ins in older browsers (#3900) 2017-03-14 15:02:20 -07:00
Anis Elleuch
a5e60706a2 xl,fs: Return 404 if object ends with a separator (#3897)
HEAD Object for FS and XL was returning invalid object name when
an object name has a trailing slash separator, this PR changes the
behavior and will always return 404 object not found, this guarantees
a better compatibility with S3 spec.
2017-03-13 22:20:46 -07:00
Harshavardhana
5f7565762e api: postPolicy cleanup. Simplify the code and re-use. (#3890)
This change is cleanup of the postPolicyHandler code
primarily to address the flow and also converting
certain critical parts into self contained functions.
2017-03-13 14:41:13 -07:00
Nitish Tiwari
3314501f19 Simplify shared mode document (#3888) 2017-03-12 16:17:03 -07:00
Harshavardhana
9d53a646a1 Simplify the title for orchestration and some words. (#3887) 2017-03-11 02:17:03 -08:00
Nitish Tiwari
5e0032e165 Update links (#3886) 2017-03-11 00:11:07 -08:00
Harshavardhana
305952d734 browser: Update ui-assets with new changes. 2017-03-10 15:13:26 -08:00
Rushan
e77885d671 Browser: Add Object.assign polyfill to support older browsers (#3884) 2017-03-10 14:30:23 -08:00
Nitish Tiwari
2410eb281e Update kafkacat command with consumer flag (#3882) 2017-03-10 09:01:59 -08:00
Harshavardhana
e54025805f browser: update ui-assets with new changes. 2017-03-09 15:26:25 -08:00
Anis Elleuch
d602495600 madmin: Do not require SSL to set credentials (#3879)
We need to relax this requirement and let the client decides
if it can allow to set credentials API over plain connection.
2017-03-09 14:08:33 -08:00
Nitish Tiwari
03937e7554 Added documentation for orchestration platforms (#3684) 2017-03-09 14:06:51 -08:00
Harshavardhana
e3b627a192 docker: Add ARM64 image build support (#3876) 2017-03-09 13:57:27 -08:00
Rushan
ccc3349f0c Browser: Show complete bucket name by removing the ellipsis cut off (#3877) 2017-03-09 10:42:38 -08:00
Harshavardhana
85cbd875fc cleanup: All conditionals simplified under pkg. (#3875)
Address all the changes reported/recommended by
`gosimple` tool.
2017-03-09 10:13:30 -08:00
Harshavardhana
43317530d5 Fix odd shadowing bug in XL init. (#3874)
Fixes #3873
2017-03-08 20:42:45 -08:00
Bala FA
8a9852220d Make unit testable cert parsing functions. (#3863) 2017-03-08 19:20:01 -08:00
Harshavardhana
47ac410ab0 Code cleanup - simplify server side code. (#3870)
Fix all the issues reported by `gosimple` tool.
2017-03-08 10:00:47 -08:00
Harshavardhana
433225ab0d docker: ca-certificates should not be removed. (#3868) 2017-03-07 16:30:19 -08:00
Harshavardhana
3e655a2c85 docker: Add ARM docker container dockerfile. (#3574) 2017-03-07 15:15:05 -08:00
Anis Elleuch
a2eae54d11 xl: Respect min. space by checking PrepareFile err (#3867)
It was possible to upload a big file which overcomes the minimal
disk space limit in XL, PrepareFile was actually checking for disk
space but we weren't checking its returned error. This patch fixes
this behavior.
2017-03-07 14:48:56 -08:00
Anis Elleuch
79e0b9e69a Relax minio server start when disk threshold is reached and adds space check in FS (#3865)
* fs: Rename tempObjPath variable in fsCreateFile()
* fs/posix: Factor checkDiskFree() function
* fs: Add disk free check in fsCreateFile()
* posix: Move free disk check to createFile()
* xl: Relax free disk check in POSIX initialization
* fs: checkDiskFree checks for space to store data
2017-03-07 12:25:40 -08:00
Krishna Srinivas
29ff9674a0 browser: Humanize expiry time for Share-Object. (#3861) 2017-03-06 20:13:52 -08:00
Bala FA
bff4d29415 Remove commands and commandsTree global variables. (#3855) 2017-03-06 19:35:26 -08:00
Krishna Srinivas
436db49bf3 Share object expiry value modification modal was not working (#3860) 2017-03-06 18:50:25 -08:00
Rushan
966818955e Browser: Implement multiple object delete (#3859) 2017-03-06 15:43:43 -08:00
Harshavardhana
e49efcb9d9 xl: quickHeal heal bucket only when needed. (#3854)
This improves the startup time significantly
for clusters which have lot of buckets.

Also fixes a bug where `.minio.sys` is created
on disks which do not have `format.json`
2017-03-06 02:00:15 -08:00
Harshavardhana
6f931d29c4 rpm: Add RPM spec for minio build. (#3853)
Currently the package is built and hosted at

https://copr.fedorainfracloud.org/coprs/minio/minio/

To enable minio repo one has to download.

Fedora - 25
https://copr.fedorainfracloud.org/coprs/minio/minio/repo/fedora-25/minio-minio-fedora-25.repo

Fedora - 26
https://copr.fedorainfracloud.org/coprs/minio/minio/repo/fedora-26/minio-minio-fedora-26.repo

Enables for both i386 and x86_64.

Fixes #3576
2017-03-05 13:09:31 -08:00
Krishnan Parthasarathi
e3fd4c0dd6 XL: Make listOnlineDisks and outDatedDisks consistent w/ each other. (#3808) 2017-03-04 14:53:28 -08:00
Harshavardhana
b05c1c11d4 browser: Update UI assets with new changes. 2017-03-03 18:05:02 -08:00
Krishna Srinivas
0bae3330e8 browser: Send correct arguments for RemoveObjects web handler. (#3848)
Fixes #3839
2017-03-03 17:07:17 -08:00
Harshavardhana
05e53f1b34 api: CopyObjectPart was copying wrong offsets due to shadowing. (#3838)
startOffset was re-assigned to '0' so it would end up
copying wrong content ignoring the requested startOffset.

This also fixes the corruption issue we observed while
using docker registry.

Fixes https://github.com/docker/distribution/issues/2205

Also fixes #3842 - incorrect routing.
2017-03-03 16:32:04 -08:00
Anis Elleuch
0c8c463a63 tests: Fix web handlers testing with faulty disks (#3845) 2017-03-03 15:46:28 -08:00
Aditya Manthramurthy
6df7bc42b8 Fix check for bucket name: (#3832)
* Do not allow bucket names with adjacent hypen and periods.
* Improve performance by eliminating the usage of regular expressions.
2017-03-03 10:23:41 -08:00
Anis Elleuch
6c00a57a7c quick: Add yaml format support (#3833)
quick Save() and Load() infers config file's format from
file name extension.
2017-03-03 10:22:09 -08:00
Harshavardhana
bc52d911ef api: Increase the maximum object size limit from 5GiB to 16GiB. (#3834)
The globalMaxObjectSize limit is instilled in S3 spec perhaps
due to certain limitations on S3 infrastructure. For minio we
don't have such limitations and we can stream a larger file
instead.

So we are going to bump this limit to 16GiB.

Fixes #3825
2017-03-03 10:14:17 -08:00
Anis Elleuch
28c53a3555 obj: Make checkBucketExist() returns all errors (#3843)
This function was returning BucketNotFound for all errors
which at least hides the fact that disks could be corrupted.
This commit fixes the behavior by returning all errors that,
are, by the way, Object API errors.
2017-03-03 10:12:43 -08:00
Harshavardhana
e5d4e7aa9d web: Validate if bucket names are reserved (#3841)
Both '.minio.sys' and 'minio' should be never allowed
to be created from web-ui and then fail to list it
by filtering them out.

Fixes #3840
2017-03-03 03:01:42 -08:00
Anis Elleuch
cddc684559 admin: Set Config returns errSet and errMsg (#3822)
There is no way to see if a node encountered an error
when trying to set a new config set, this commit adds
a bool errSet field.
2017-03-03 02:53:48 -08:00
Zejun Li
32d0d3d4ac Enhanced newObjectLayerFn (#3837) 2017-03-03 01:07:45 -08:00
Bala FA
98d17d2a97 Remove globalQuiet and globalConfigDir global variables (#3830) 2017-03-02 14:21:30 -08:00
Bala FA
208dd15245 Remove globalMaxCacheSize and globalCacheExpiry variables (#3826)
This patch fixes below

* Remove global variables globalMaxCacheSize and globalCacheExpiry.
* Make global variables into constant in objcache package.
2017-03-02 10:34:37 -08:00
Anis Elleuch
a179fc9658 quick: Simplify Load() and CheckVersion() (#3831) 2017-03-02 10:29:06 -08:00
Zejun Li
d1afd16955 Using RWMutex to guard closing and listeners (#3829) 2017-03-02 10:00:22 -08:00
Bala FA
2348ae7a19 Make default values as constants (#3828) 2017-03-02 04:58:39 -08:00
Bala FA
480ea826dc Move rlimit functions into sys package. (#3824)
This patch addresses below

* go build works for bsd family
* probe total RAM size for bsd family
* make unit testable functions
2017-03-01 21:51:57 -08:00
Aditya Manthramurthy
09e9fd745c Close client connection after checking for release update (#3820) 2017-03-01 09:18:55 -08:00
Anis Elleuch
77c1998a38 config: Fix creating new config with wrong version (#3821)
Simplify a little config code to avoid making mistake
next time.
2017-03-01 09:17:04 -08:00
Krishna Srinivas
91cf54f895 web-handlers: Support removal of multiple objects at once. (#3810) 2017-02-28 19:07:28 -08:00
Karthic Rao
2b0ed21f08 tests: Fix test server init - cleanup (#3806) 2017-02-28 18:05:52 -08:00
Harshavardhana
472fa4a6ca api: Multi object delete should be protected. (#3814)
Add missing protection from deleting multiple objects
in parallel. Currently we are deleting objects without
proper locking through this API.

This can cause significant amount of races.
2017-02-28 18:00:24 -08:00
Bala FA
097cec676a fix: Set globalMaxCacheSize to allowable value. (#3816)
If memory resource limit and total RAM are more than 8GiB, either 50%
of memory resource limit or total RAM is set to globalMaxCacheSize.
2017-02-28 16:51:52 -08:00
Rushan
fcad4a44fd Browser: Remove duplicate object entries while sorting (#3813) 2017-02-28 13:11:34 -08:00
Anis Elleuch
9b3c014bab config: Add browser parameter (#3807)
browser new parameter receives "on" or "off" parameter which is similar
to MINIO_BROWSER
2017-02-27 14:59:53 -08:00
Krishnan Parthasarathi
c9619673fb Implement SetConfig admin API handler. (#3792) 2017-02-27 11:40:27 -08:00
Anis Elleuch
dce0345f8f Set disk to nil after write which needs quorum (#3795)
Ignore a disk which wasn't able to successfully perform an action to
avoid eventual perturbations when the disk comes back in the middle
of write change.
2017-02-26 11:58:32 -08:00
Anis Elleuch
461b2bbd37 admin: Move SetCredentials from Service to Generic (#3805)
Setting credentials doesn't belong to service management API
anymore.
2017-02-25 11:06:08 -08:00
Bala FA
69777b654e event: use common initialization logic (#3798)
Previously creating and adding targets for each notification type was
repeated.  This patch fixes it.
2017-02-24 18:27:52 -08:00
Harshavardhana
70d2cb5f4d rpc: Remove time check for each RPC calls. (#3804)
This removal comes to avoid some redundant requirements
which are adding more problems on a production setup.

Here are the list of checks for time as they happen

 - Fresh connect (during server startup) - CORRECT
 - A reconnect after network disconnect - CORRECT
 - For each RPC call - INCORRECT.

Verifying time for each RPC aggravates a situation
where a RPC call is rejected in a sequence of events
due to enough load on a production setup. 3 second
might not be enough time window for the call to be
initiated and received by the server.
2017-02-24 18:26:56 -08:00
Harshavardhana
cff45db1b9 cli: Use ADDRESS:PORT to clarify --address behavior (#3803)
Currently we document as IP:PORT which doesn't provide
if someone can use HOSTNAME:PORT. This is a change
to clarify this by calling it as ADDRESS:PORT which
encompasses both a HOSTNAME and an IP.

Fixes #3799
2017-02-24 14:19:20 -08:00
Harshavardhana
bcc5b6e1ef xl: Rename getOrderedDisks as shuffleDisks appropriately. (#3796)
This PR is for readability cleanup

- getOrderedDisks as shuffleDisks
- getOrderedPartsMetadata as shufflePartsMetadata

Distribution is now a second argument instead being the
primary input argument for brevity.

Also change the usage of type casted int64(0), instead
rely on direct type reference as `var variable int64` everywhere.
2017-02-24 09:20:40 -08:00
Harshavardhana
25b5a0534f browser: Update ui-assets and fix the copyright header. (#3790) 2017-02-22 17:27:26 -08:00
Rushan
52d6678bf0 Browser: Implement multi select user interface for object listings (#3730) 2017-02-22 14:51:38 -08:00
Nitish Tiwari
d8950ba7c5 Added server times note and fix Notes rendering for Doctor. (#3787) 2017-02-22 02:12:03 -08:00
Harshavardhana
cc28765025 xl/multipart: Make sure to delete temp renamed object. (#3785)
Existing objects before overwrites are renamed to
temp location in completeMultipart. We make sure
that we delete it even if subsequenty calls fail.

Additionally move verifying of parent dir is a
file earlier to fail the entire operation.

Ref #3784
2017-02-21 19:43:44 -08:00
Harshavardhana
fe86319c56 ci: For windows builds stick to go1.7.5 (#3786) 2017-02-21 17:24:11 -08:00
Harshavardhana
99a12613a3 update: For source builds look for absolute path. (#3780)
os.Args[0] doesn't point to absolute path we need
use exec.LookPath to find the absolute path before
sending os.Stat().
2017-02-21 01:32:05 -08:00
Nitish Tiwari
097dd7418a Remove unused erasure diagram (#3783) 2017-02-20 21:01:08 -08:00
Nitish Tiwari
a7d3ea8c15 Update erasure code image (#3782) 2017-02-20 20:12:21 -08:00
Krishnan Parthasarathi
2745bf2f1f Implement ServerConfig admin REST API (#3741)
Returns a valid config.json of the setup. In case of distributed
setup, it checks if quorum or more number of nodes have the same
config.json.
2017-02-20 12:58:50 -08:00
Anis Elleuch
70d825c608 doc: Small rewrite of bucket events notif intro (#3775) 2017-02-20 12:07:27 -08:00
Harshavardhana
6b68c0170f For streaming signature do not save content-encoding in PutObject() (#3776)
Content-Encoding is set to "aws-chunked" which is an S3 specific
API value which is no meaning for an object. This is how S3
behaves as well for a streaming signature uploaded object.
2017-02-20 12:07:03 -08:00
Aditya Manthramurthy
0a905e1a8a Fix rabbitmq reconnect problem (#3778) 2017-02-20 12:05:21 -08:00
Harshavardhana
9eb8e375c5 cli: Make sure to add --help flag for subcommands. (#3773)
--help is now back and prints properly with command
help template.
2017-02-19 20:46:06 -08:00
Harshavardhana
7ea1de8245 copyObject: Be case sensitive for windows only server. (#3766)
For case sensitive platforms we should honor case.

Fixes #3765

```
1) python s3cmd -c s3cfg_localminio put logo.png s3://testbucket/xyz/etc2/logo.PNG

2) python s3cmd -c s3cfg_localminio ls s3://testbucket/xyz/etc2/
2017-02-18 10:58     22059   s3://testbucket/xyz/etc2/logo.PNG

3) python s3cmd -c s3cfg_localminio cp s3://testbucket/xyz/etc2/logo.PNG s3://testbucket/xyz/etc2/logo.png
remote copy: 's3://testbucket/xyz/etc2/logo.PNG' -> 's3://testbucket/xyz/etc2/logo.png'

4) python s3cmd -c s3cfg_localminio ls s3://testbucket/xyz/etc2/
2017-02-18 10:58     22059   s3://testbucket/xyz/etc2/logo.PNG
2017-02-18 11:10     22059   s3://testbucket/xyz/etc2/logo.png
```
2017-02-18 13:41:59 -08:00
Anis Elleuch
54a18592e9 flags: Fix --version output (#3772) 2017-02-18 13:41:33 -08:00
Anis Elleuch
7e84c7427d server-mux: Rewrite graceful shutdown mechanism (#3771)
Old code uses waitgroup Add() and Wait() in different threads,
which eventually can lead to a race.
2017-02-18 13:28:54 -08:00
Bala FA
d12f3e06b1 config-old: Use interface to avoid code repetition. (#3769) 2017-02-18 10:45:37 -08:00
Harshavardhana
0137ff498a auth/rpc: Token can be concurrently edited protect it. (#3764)
Make sure we protect when we access `authToken` in authClient.

Fixes #3761
2017-02-18 03:15:42 -08:00
Harshavardhana
34d9a6b46a Make sure client initializes to proper lock RPC path. (#3763)
Fixes a regression introduced in previous commit.
2017-02-18 02:52:11 -08:00
Harshavardhana
50b4e54a75 fs: Do not return reservedBucket names in ListBuckets() (#3754)
Make sure to skip reserved bucket names in `ListBuckets()`
current code didn't skip this properly and also generalize
this behavior for both XL and FS.
2017-02-16 14:52:14 -08:00
Harshavardhana
8816b08aae Fix the systemd config path to the new URL 2017-02-15 21:28:06 -08:00
Harshavardhana
271e3ecde5 Fix tests from cli changes 2017-02-15 18:05:55 -08:00
298 changed files with 25820 additions and 3222 deletions

View File

@@ -1,6 +1,9 @@
go_import_path: github.com/minio/minio
sudo: required
services:
- docker
dist: trusty
language: go
@@ -13,12 +16,25 @@ env:
- ARCH=i686
script:
## Run all the tests
- make
- make test GOFLAGS="-timeout 20m -race -v"
- make test GOFLAGS="-timeout 15m -race -v"
- make coverage
# Refer https://blog.hypriot.com/post/setup-simple-ci-pipeline-for-arm-images/
# push image
- >
if [ "$TRAVIS_BRANCH" == "master" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$ARCH" == "x86_64" ]; then
docker run --rm --privileged multiarch/qemu-user-static:register --reset
docker build -t minio/minio:edge-armhf . -f Dockerfile.armhf
docker build -t minio/minio:edge-aarch64 . -f Dockerfile.aarch64
docker login -u="$DOCKER_USER" -p="$DOCKER_PASS"
docker push minio/minio:edge-armhf
docker push minio/minio:edge-aarch64
fi
after_success:
- bash <(curl -s https://codecov.io/bash)
go:
- 1.7.4
- 1.7.5

View File

@@ -1,16 +1,18 @@
FROM golang:1.7-alpine
FROM alpine:3.5
WORKDIR /go/src/app
ENV GOPATH /go
ENV PATH $PATH:$GOPATH/bin
ENV CGO_ENABLED 0
COPY . /go/src/app
WORKDIR /go/src/github.com/minio/
RUN \
apk add --no-cache git && \
go-wrapper download && \
go-wrapper install -ldflags "-X github.com/minio/minio/cmd.Version=2017-02-16T01:47:30Z -X github.com/minio/minio/cmd.ReleaseTag=RELEASE.2017-02-16T01-47-30Z -X github.com/minio/minio/cmd.CommitID=3d98311d9f4ceb78dba996dcdc0751253241e697" && \
mkdir -p /export/docker && \
rm -rf /go/pkg /go/src && \
apk del git
RUN \
apk add --no-cache ca-certificates && \
apk add --no-cache --virtual .build-deps git go musl-dev && \
go get -v -d github.com/minio/minio && \
cd /go/src/github.com/minio/minio && \
go install -v -ldflags "-X github.com/minio/minio/cmd.Version=2017-04-29T00:40:27Z -X github.com/minio/minio/cmd.ReleaseTag=RELEASE.2017-04-29T00-40-27Z -X github.com/minio/minio/cmd.CommitID=eb50175ad911d496bf583a599de89547f0c9be89" && \
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
EXPOSE 9000
ENTRYPOINT ["minio"]

19
Dockerfile.aarch64 Normal file
View File

@@ -0,0 +1,19 @@
FROM resin/aarch64-alpine:3.5
ENV GOPATH /go
ENV PATH $PATH:$GOPATH/bin
ENV CGO_ENABLED 0
WORKDIR /go/src/github.com/minio/
RUN \
apk add --no-cache ca-certificates && \
apk add --no-cache --virtual .build-deps git go musl-dev && \
go get -v -d github.com/minio/minio && \
cd /go/src/github.com/minio/minio && \
go install -v -ldflags "-X github.com/minio/minio/cmd.Version=2017-03-16T21:50:32Z -X github.com/minio/minio/cmd.ReleaseTag=RELEASE.2017-03-16T21-50-32Z -X github.com/minio/minio/cmd.CommitID=5311eb22fd681a8cd4a46e2a872d46c2352c64e8" && \
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
EXPOSE 9000
ENTRYPOINT ["minio"]
VOLUME ["/export"]

19
Dockerfile.armhf Normal file
View File

@@ -0,0 +1,19 @@
FROM resin/armhf-alpine:3.5
ENV GOPATH /go
ENV PATH $PATH:$GOPATH/bin
ENV CGO_ENABLED 0
WORKDIR /go/src/github.com/minio/
RUN \
apk add --no-cache ca-certificates && \
apk add --no-cache --virtual .build-deps git go musl-dev && \
go get -v -d github.com/minio/minio && \
cd /go/src/github.com/minio/minio && \
go install -v -ldflags "-X github.com/minio/minio/cmd.Version=2017-03-16T21:50:32Z -X github.com/minio/minio/cmd.ReleaseTag=RELEASE.2017-03-16T21-50-32Z -X github.com/minio/minio/cmd.CommitID=5311eb22fd681a8cd4a46e2a872d46c2352c64e8" && \
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
EXPOSE 9000
ENTRYPOINT ["minio"]
VOLUME ["/export"]

View File

@@ -1,7 +1,7 @@
LDFLAGS := $(shell go run buildscripts/gen-ldflags.go)
PWD := $(shell pwd)
GOPATH := $(shell go env GOPATH)
BUILD_LDFLAGS := '$(LDFLAGS)'
BUILD_LDFLAGS := '$(LDFLAGS) -s -w'
TAG := latest
HOST ?= $(shell uname)
@@ -71,10 +71,8 @@ verifiers: vet fmt lint cyclo spelling
vet:
@echo "Running $@:"
@go tool vet -all ./cmd
@go tool vet -all ./pkg
@go tool vet -shadow=true ./cmd
@go tool vet -shadow=true ./pkg
@go vet github.com/minio/minio/cmd/...
@go vet github.com/minio/minio/pkg/...
fmt:
@echo "Running $@:"

View File

@@ -18,19 +18,19 @@ docker run -p 9000:9000 minio/minio:edge server /export
```
Please visit Minio Docker quickstart guide for more [here](https://docs.minio.io/docs/minio-docker-quickstart-guide)
## OS X
## macOS
### Homebrew
Install minio packages using [Homebrew](http://brew.sh/)
```sh
brew install minio
brew install minio/stable/minio
minio server ~/Photos
```
### Binary Download
| Platform| Architecture | URL|
| ----------| -------- | ------|
|Apple OS X|64-bit Intel|https://dl.minio.io/server/minio/release/darwin-amd64/minio|
|Apple macOS|64-bit Intel|https://dl.minio.io/server/minio/release/darwin-amd64/minio |
```sh
chmod 755 minio
./minio server ~/Photos

View File

@@ -11,11 +11,12 @@ clone_folder: c:\gopath\src\github.com\minio\minio
# Environment variables
environment:
GOROOT: c:\go17
GOPATH: c:\gopath
# scripts that run after cloning repository
install:
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- set PATH=%GOPATH%\bin;c:\go17\bin;%PATH%
- go version
- go env
- python --version
@@ -35,9 +36,9 @@ test_script:
# Unit tests
- ps: Add-AppveyorTest "Unit Tests" -Outcome Running
- mkdir build\coverage
- go test -timeout 20m -v -race github.com/minio/minio/cmd...
- go test -v -race github.com/minio/minio/pkg...
- go test -timeout 15m -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd
- go test -timeout 17m -race github.com/minio/minio/cmd...
- go test -race github.com/minio/minio/pkg...
- go test -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd
- ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
after_test:

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,13 +14,9 @@
* limitations under the License.
*/
import url from 'url'
import Moment from 'moment'
import browserHistory from 'react-router/lib/browserHistory'
import web from './web'
import * as utils from './utils'
import storage from 'local-storage-fallback'
import { minioBrowserPrefix } from './constants'
export const SET_WEB = 'SET_WEB'
@@ -28,7 +24,6 @@ export const SET_CURRENT_BUCKET = 'SET_CURRENT_BUCKET'
export const SET_CURRENT_PATH = 'SET_CURRENT_PATH'
export const SET_BUCKETS = 'SET_BUCKETS'
export const ADD_BUCKET = 'ADD_BUCKET'
export const ADD_OBJECT = 'ADD_OBJECT'
export const SET_VISIBLE_BUCKETS = 'SET_VISIBLE_BUCKETS'
export const SET_OBJECTS = 'SET_OBJECTS'
export const SET_STORAGE_INFO = 'SET_STORAGE_INFO'
@@ -57,6 +52,9 @@ export const SET_SHARE_OBJECT = 'SET_SHARE_OBJECT'
export const DELETE_CONFIRMATION = 'DELETE_CONFIRMATION'
export const SET_PREFIX_WRITABLE = 'SET_PREFIX_WRITABLE'
export const REMOVE_OBJECT = 'REMOVE_OBJECT'
export const CHECKED_OBJECTS_ADD = 'CHECKED_OBJECTS_ADD'
export const CHECKED_OBJECTS_REMOVE = 'CHECKED_OBJECTS_REMOVE'
export const CHECKED_OBJECTS_RESET = 'CHECKED_OBJECTS_RESET'
export const showDeleteConfirmation = (object) => {
return {
@@ -78,11 +76,12 @@ export const hideDeleteConfirmation = () => {
}
}
export const showShareObject = url => {
export const showShareObject = (object, url) => {
return {
type: SET_SHARE_OBJECT,
shareObject: {
url: url,
object,
url,
show: true
}
}
@@ -98,15 +97,17 @@ export const hideShareObject = () => {
}
}
export const shareObject = (object, expiry) => (dispatch, getState) => {
export const shareObject = (object, days, hours, minutes) => (dispatch, getState) => {
const {currentBucket, web} = getState()
let host = location.host
let bucket = currentBucket
if (!web.LoggedIn()) {
dispatch(showShareObject(`${host}/${bucket}/${object}`))
dispatch(showShareObject(object, `${host}/${bucket}/${object}`))
return
}
let expiry = days * 24 * 60 * 60 + hours * 60 * 60 + minutes * 60
web.PresignedGet({
host,
bucket,
@@ -114,7 +115,11 @@ export const shareObject = (object, expiry) => (dispatch, getState) => {
expiry
})
.then(obj => {
dispatch(showShareObject(obj.url))
dispatch(showShareObject(object, obj.url))
dispatch(showAlert({
type: 'success',
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes.`
}))
})
.catch(err => {
dispatch(showAlert({
@@ -304,13 +309,13 @@ export const listObjects = () => {
marker: marker
})
.then(res => {
let objects = res.objects
let objects = res.objects
if (!objects)
objects = []
objects = objects.map(object => {
object.name = object.name.replace(`${currentPath}`, '');
return object
})
object.name = object.name.replace(`${currentPath}`, '');
return object
})
dispatch(setObjects(objects, res.nextmarker, res.istruncated))
dispatch(setPrefixWritable(res.writable))
dispatch(setLoadBucket(''))
@@ -344,9 +349,9 @@ export const selectPrefix = prefix => {
if (!objects)
objects = []
objects = objects.map(object => {
object.name = object.name.replace(`${prefix}`, '');
return object
})
object.name = object.name.replace(`${prefix}`, '');
return object
})
dispatch(setObjects(
objects,
res.nextmarker,
@@ -410,6 +415,25 @@ export const setLoginError = () => {
}
}
export const downloadSelected = (url, req, xhr) => {
return (dispatch) => {
xhr.open('POST', url, true)
xhr.responseType = 'blob'
xhr.onload = function(e) {
if (this.status == 200) {
dispatch(checkedObjectsReset())
var blob = new Blob([this.response], {
type: 'application/zip'
})
var blobUrl = window.URL.createObjectURL(blob);
window.location = blobUrl
}
};
xhr.send(JSON.stringify(req));
}
}
export const uploadFile = (file, xhr) => {
return (dispatch, getState) => {
const {currentBucket, currentPath} = getState()
@@ -563,3 +587,24 @@ export const setPolicies = (policies) => {
policies
}
}
export const checkedObjectsAdd = (objectName) => {
return {
type: CHECKED_OBJECTS_ADD,
objectName
}
}
export const checkedObjectsRemove = (objectName) => {
return {
type: CHECKED_OBJECTS_REMOVE,
objectName
}
}
export const checkedObjectsReset = (objectName) => {
return {
type: CHECKED_OBJECTS_RESET,
objectName
}
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,7 +27,6 @@ import OverlayTrigger from 'react-bootstrap/lib/OverlayTrigger'
import Tooltip from 'react-bootstrap/lib/Tooltip'
import Dropdown from 'react-bootstrap/lib/Dropdown'
import MenuItem from 'react-bootstrap/lib/MenuItem'
import InputGroup from '../components/InputGroup'
import Dropzone from '../components/Dropzone'
import ObjectsList from '../components/ObjectsList'
@@ -227,14 +226,24 @@ export default class Browse extends React.Component {
}
removeObject() {
const {web, dispatch, currentPath, currentBucket, deleteConfirmation} = this.props
const {web, dispatch, currentPath, currentBucket, deleteConfirmation, checkedObjects} = this.props
let objects = checkedObjects.length > 0 ? checkedObjects : [deleteConfirmation.object]
web.RemoveObject({
bucketName: currentBucket,
objectName: deleteConfirmation.object
bucketname: currentBucket,
objects: objects
})
.then(() => {
this.hideDeleteConfirmation()
dispatch(actions.removeObject(deleteConfirmation.object))
if (checkedObjects.length > 0) {
for (let i = 0; i < checkedObjects.length; i++) {
dispatch(actions.removeObject(checkedObjects[i].replace(currentPath, '')))
}
dispatch(actions.checkedObjectsReset())
} else {
let delObject = deleteConfirmation.object.replace(currentPath, '')
dispatch(actions.removeObject(delObject))
}
})
.catch(e => dispatch(actions.showAlert({
type: 'danger',
@@ -262,7 +271,8 @@ export default class Browse extends React.Component {
shareObject(e, object) {
e.preventDefault()
const {dispatch} = this.props
dispatch(actions.shareObject(object))
// let expiry = 5 * 24 * 60 * 60 // 5 days expiry by default
dispatch(actions.shareObject(object, 5, 0, 0))
}
hideShareObjectModal() {
@@ -354,18 +364,46 @@ export default class Browse extends React.Component {
this.refs.copyTextInput.select()
}
handleExpireValue(targetInput, inc) {
handleExpireValue(targetInput, inc, object) {
inc === -1 ? this.refs[targetInput].stepDown(1) : this.refs[targetInput].stepUp(1)
if (this.refs.expireDays.value == 7) {
this.refs.expireHours.value = 0
this.refs.expireMins.value = 0
}
if (this.refs.expireDays.value + this.refs.expireHours.value + this.refs.expireMins.value == 0) {
this.refs.expireDays.value = 7
}
const {dispatch} = this.props
dispatch(actions.shareObject(object, this.refs.expireDays.value, this.refs.expireHours.value, this.refs.expireMins.value))
}
checkObject(e, objectName) {
const {dispatch} = this.props
e.target.checked ? dispatch(actions.checkedObjectsAdd(objectName)) : dispatch(actions.checkedObjectsRemove(objectName))
}
downloadSelected() {
const {dispatch} = this.props
let req = {
bucketName: this.props.currentBucket,
objects: this.props.checkedObjects,
prefix: this.props.currentPath
}
let requestUrl = location.origin + "/minio/zip?token=" + localStorage.token
this.xhr = new XMLHttpRequest()
dispatch(actions.downloadSelected(requestUrl, req, this.xhr))
}
clearSelected() {
const {dispatch} = this.props
dispatch(actions.checkedObjectsReset())
}
render() {
const {total, free} = this.props.storageInfo
const {showMakeBucketModal, alert, sortNameOrder, sortSizeOrder, sortDateOrder, showAbout, showBucketPolicy} = this.props
const {showMakeBucketModal, alert, sortNameOrder, sortSizeOrder, sortDateOrder, showAbout, showBucketPolicy, checkedObjects} = this.props
const {version, memory, platform, runtime} = this.props.serverInfo
const {sidebarStatus} = this.props
const {showSettings} = this.props
@@ -435,7 +473,6 @@ export default class Browse extends React.Component {
</li>
</ul>
</div>
}
let createButton = ''
@@ -490,6 +527,14 @@ export default class Browse extends React.Component {
clickOutside={ this.hideSidebar.bind(this) }
showPolicy={ this.showBucketPolicy.bind(this) } />
<div className="fe-body">
<div className={ 'list-actions' + (classNames({
' list-actions-toggled': checkedObjects.length > 0
})) }>
<span className="la-label"><i className="fa fa-check-circle" /> { checkedObjects.length } Objects selected</span>
<span className="la-actions pull-right"><button onClick={ this.downloadSelected.bind(this) }> Download all as zip </button></span>
<span className="la-actions pull-right"><button onClick={ this.showDeleteConfirmation.bind(this) }> Delete selected </button></span>
<i className="la-close fa fa-times" onClick={ this.clearSelected.bind(this) }></i>
</div>
<Dropzone>
{ alertBox }
<header className="fe-header-mobile hidden-lg hidden-md">
@@ -515,7 +560,8 @@ export default class Browse extends React.Component {
</header>
<div className="feb-container">
<header className="fesl-row" data-type="folder">
<div className="fesl-item fi-name" onClick={ this.sortObjectsByName.bind(this) } data-sort="name">
<div className="fesl-item fesl-item-icon"></div>
<div className="fesl-item fesl-item-name" onClick={ this.sortObjectsByName.bind(this) } data-sort="name">
Name
<i className={ classNames({
'fesli-sort': true,
@@ -524,7 +570,7 @@ export default class Browse extends React.Component {
'fa-sort-alpha-asc': !sortNameOrder
}) } />
</div>
<div className="fesl-item fi-size" onClick={ this.sortObjectsBySize.bind(this) } data-sort="size">
<div className="fesl-item fesl-item-size" onClick={ this.sortObjectsBySize.bind(this) } data-sort="size">
Size
<i className={ classNames({
'fesli-sort': true,
@@ -533,7 +579,7 @@ export default class Browse extends React.Component {
'fa-sort-amount-asc': !sortSizeOrder
}) } />
</div>
<div className="fesl-item fi-modified" onClick={ this.sortObjectsByDate.bind(this) } data-sort="last-modified">
<div className="fesl-item fesl-item-modified" onClick={ this.sortObjectsByDate.bind(this) } data-sort="last-modified">
Last Modified
<i className={ classNames({
'fesli-sort': true,
@@ -542,7 +588,7 @@ export default class Browse extends React.Component {
'fa-sort-numeric-asc': !sortDateOrder
}) } />
</div>
<div className="fesl-item fi-actions"></div>
<div className="fesl-item fesl-item-actions"></div>
</header>
</div>
<div className="feb-container">
@@ -553,9 +599,11 @@ export default class Browse extends React.Component {
<ObjectsList dataType={ this.dataType.bind(this) }
selectPrefix={ this.selectPrefix.bind(this) }
showDeleteConfirmation={ this.showDeleteConfirmation.bind(this) }
shareObject={ this.shareObject.bind(this) } />
shareObject={ this.shareObject.bind(this) }
checkObject={ this.checkObject.bind(this) }
checkedObjectsArray={ checkedObjects } />
</InfiniteScroll>
<div className="text-center" style={ { display: istruncated ? 'block' : 'none' } }>
<div className="text-center" style={ { display: (istruncated && currentBucket) ? 'block' : 'none' } }>
<span>Loading...</span>
</div>
</div>
@@ -673,7 +721,7 @@ export default class Browse extends React.Component {
</label>
<div className="set-expire">
<div className="set-expire-item">
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireDays', 1) }></i>
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireDays', 1, shareObject.object) }></i>
<div className="set-expire-title">
Days
</div>
@@ -682,12 +730,12 @@ export default class Browse extends React.Component {
type="number"
min={ 0 }
max={ 7 }
defaultValue={ 0 } />
defaultValue={ 5 } />
</div>
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireDays', -1) }></i>
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireDays', -1, shareObject.object) }></i>
</div>
<div className="set-expire-item">
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireHours', 1) }></i>
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireHours', 1, shareObject.object) }></i>
<div className="set-expire-title">
Hours
</div>
@@ -695,30 +743,30 @@ export default class Browse extends React.Component {
<input ref="expireHours"
type="number"
min={ 0 }
max={ 24 }
max={ 23 }
defaultValue={ 0 } />
</div>
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireHours', -1) }></i>
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireHours', -1, shareObject.object) }></i>
</div>
<div className="set-expire-item">
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireMins', 1) }></i>
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireMins', 1, shareObject.object) }></i>
<div className="set-expire-title">
Minutes
</div>
<div className="set-expire-value">
<input ref="expireMins"
type="number"
min={ 1 }
max={ 60 }
defaultValue={ 45 } />
min={ 0 }
max={ 59 }
defaultValue={ 0 } />
</div>
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireMins', -1) }></i>
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireMins', -1, shareObject.object) }></i>
</div>
</div>
</div>
</div>
</ModalBody>
<div className="modal-footer">
<CopyToClipboard text={ shareObject.url } onCopy={ this.showMessage.bind(this) }>
<CopyToClipboard text={ window.location.protocol + '//' + shareObject.url } onCopy={ this.showMessage.bind(this) }>
<button className="btn btn-success">
Copy Link
</button>

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016, 2017 Minio, Inc.
* Minio Cloud Storage (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,7 +27,7 @@ let BrowserDropdown = ({fullScreenFunc, aboutFunc, settingsFunc, logoutFunc}) =>
</Dropdown.Toggle>
<Dropdown.Menu className="dropdown-menu-right">
<li>
<a target="_blank" href="https://github.com/minio/miniobrowser">Github <i className="fa fa-github"></i></a>
<a target="_blank" href="https://github.com/minio/minio">Github <i className="fa fa-github"></i></a>
</li>
<li>
<a href="" onClick={ fullScreenFunc }>Fullscreen <i className="fa fa-expand"></i></a>

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,11 +39,12 @@ export default class Dropzone extends React.Component {
// won't handle child elements correctly.
const style = {
height: '100%',
borderWidth: '2px',
borderWidth: '0',
borderStyle: 'dashed',
borderColor: '#fff'
}
const activeStyle = {
borderWidth: '2px',
borderColor: '#777'
}
const rejectStyle = {

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,8 +20,7 @@ import humanize from 'humanize'
import connect from 'react-redux/lib/components/connect'
import Dropdown from 'react-bootstrap/lib/Dropdown'
let ObjectsList = ({objects, currentPath, selectPrefix, dataType, showDeleteConfirmation, shareObject, loadPath}) => {
let ObjectsList = ({objects, currentPath, selectPrefix, dataType, showDeleteConfirmation, shareObject, loadPath, checkObject, checkedObjectsArray}) => {
const list = objects.map((object, i) => {
let size = object.name.endsWith('/') ? '-' : humanize.filesize(object.size)
let lastModified = object.name.endsWith('/') ? '-' : Moment(object.lastModified).format('lll')
@@ -30,29 +29,51 @@ let ObjectsList = ({objects, currentPath, selectPrefix, dataType, showDeleteConf
let deleteButton = ''
if (web.LoggedIn())
deleteButton = <a href="" className="fiad-action" onClick={ (e) => showDeleteConfirmation(e, `${currentPath}${object.name}`) }><i className="fa fa-trash"></i></a>
if (!object.name.endsWith('/')) {
actionButtons = <Dropdown id="fia-dropdown">
<Dropdown.Toggle noCaret className="fia-toggle"></Dropdown.Toggle>
<Dropdown.Menu>
<a href="" className="fiad-action" onClick={ (e) => shareObject(e, `${currentPath}${object.name}`) }><i className="fa fa-copy"></i></a>
{ deleteButton }
</Dropdown.Menu>
</Dropdown>
if (!checkedObjectsArray.length > 0) {
if (!object.name.endsWith('/')) {
actionButtons = <Dropdown id={ "fia-dropdown-" + object.name.replace('.', '-') }>
<Dropdown.Toggle noCaret className="fia-toggle"></Dropdown.Toggle>
<Dropdown.Menu>
<a href="" className="fiad-action" onClick={ (e) => shareObject(e, `${currentPath}${object.name}`) }><i className="fa fa-copy"></i></a>
{ deleteButton }
</Dropdown.Menu>
</Dropdown>
}
}
let activeClass = ''
let isChecked = ''
if (checkedObjectsArray.indexOf(object.name) > -1) {
activeClass = ' fesl-row-selected'
isChecked = true
}
return (
<div key={ i } className={ "fesl-row " + loadingClass } data-type={ dataType(object.name, object.contentType) }>
<div className="fesl-item fi-name">
<div key={ i } className={ "fesl-row " + loadingClass + activeClass } data-type={ dataType(object.name, object.contentType) }>
<div className="fesl-item fesl-item-icon">
<div className="fi-select">
<input type="checkbox"
name={ object.name }
checked={ isChecked }
onChange={ (e) => checkObject(e, object.name) } />
<i className="fis-icon"></i>
<i className="fis-helper"></i>
</div>
</div>
<div className="fesl-item fesl-item-name">
<a href="" onClick={ (e) => selectPrefix(e, `${currentPath}${object.name}`) }>
{ object.name }
</a>
</div>
<div className="fesl-item fi-size">
<div className="fesl-item fesl-item-size">
{ size }
</div>
<div className="fesl-item fi-modified">
<div className="fesl-item fesl-item-modified">
{ lastModified }
</div>
<div className="fesl-item fi-actions">
<div className="fesl-item fesl-item-actions">
{ actionButtons }
</div>
</div>
@@ -72,4 +93,4 @@ export default connect(state => {
currentPath: state.currentPath,
loadPath: state.loadPath
}
})(ObjectsList)
})(ObjectsList)

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -77,4 +77,4 @@ class Policy extends Component {
}
}
export default connect(state => state)(Policy)
export default connect(state => state)(Policy)

View File

@@ -80,4 +80,4 @@ class PolicyInput extends Component {
}
}
export default connect(state => state)(PolicyInput)
export default connect(state => state)(PolicyInput)

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -54,9 +54,10 @@ export default (state = {
shareObject: {
show: false,
url: '',
expiry: 604800
object: ''
},
prefixWritable: false
prefixWritable: false,
checkedObjects: []
}, action) => {
let newState = Object.assign({}, state)
switch (action.type) {
@@ -82,7 +83,7 @@ export default (state = {
newState.marker = ""
newState.istruncated = action.istruncated
} else {
newState.objects = [...newState.objects, ...action.objects]
newState.objects = [...action.objects]
newState.marker = action.marker
newState.istruncated = action.istruncated
}
@@ -185,6 +186,19 @@ export default (state = {
if (idx == -1) break
newState.objects = [...newState.objects.slice(0, idx), ...newState.objects.slice(idx + 1)]
break
case actions.CHECKED_OBJECTS_ADD:
newState.checkedObjects = [...newState.checkedObjects, action.objectName]
break
case actions.CHECKED_OBJECTS_REMOVE:
let index = newState.checkedObjects.indexOf(action.objectName)
if (index == -1) break
newState.checkedObjects = [...newState.checkedObjects.slice(0, index), ...newState.checkedObjects.slice(index + 1)]
break
case actions.CHECKED_OBJECTS_RESET:
newState.checkedObjects = []
break
}
return newState
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -35,6 +35,10 @@
width: 100%;
}
.btn-white {
.btn-variant(#fff, darken(@text-color, 20%));
}
.btn-link {
.btn-variant(#eee, #545454);
}

View File

@@ -2,14 +2,13 @@
Header
----------------------------*/
.fe-header {
padding: 45px 55px 20px;
@media(min-width: @screen-md-min) {
@media(min-width: (@screen-sm-min - 100)) {
position: relative;
padding: 40px 40px 20px 45px;
}
@media(max-width: (@screen-xs-max - 100)) {
padding: 25px 25px 20px;
padding: 20px;
}
h2 {
@@ -239,4 +238,3 @@
}

View File

@@ -2,17 +2,19 @@
Row
----------------------------*/
.fesl-row {
padding-right: 40px;
padding-top: 5px;
padding-bottom: 5px;
position: relative;
@media (min-width: (@screen-sm-min - 100px)) {
padding: 5px 20px 5px 40px;
display: flex;
flex-flow: row nowrap;
justify-content: space-between;
}
@media(max-width: (@screen-xs-max - 100px)) {
padding: 5px 20px;
}
.clearfix();
}
@@ -20,7 +22,7 @@ header.fesl-row {
@media (min-width:(@screen-sm-min - 100px)) {
margin-bottom: 20px;
border-bottom: 1px solid lighten(@text-muted-color, 20%);
padding-left: 40px;
padding-left: 30px;
.fesl-item,
.fesli-sort {
@@ -42,7 +44,7 @@ header.fesl-row {
font-size: 14px;
}
&:hover:not(.fi-actions) {
&:hover:not(.fesl-item-actions) {
background: lighten(@text-muted-color, 22%);
color: @dark-gray;
@@ -58,54 +60,42 @@ header.fesl-row {
}
}
.list-type(@background, @icon) {
.fis-icon {
background-color: @background;
&:before {
content: @icon;
}
}
}
div.fesl-row {
padding-left: 85px;
border-bottom: 1px solid transparent;
cursor: default;
.transition(background-color);
.transition-duration(500ms);
@media (max-width: (@screen-xs-max - 100px)) {
padding-left: 70px;
padding-right: 45px;
padding: 5px 20px;
}
&:nth-child(even) {
background-color: #fafafa;
}
&:hover {
background-color: #fbf7dc;
}
&[data-type]:before {
font-family: @font-family-icon;
width: 35px;
height: 35px;
text-align: center;
line-height: 35px;
position: absolute;
border-radius: 50%;
font-size: 16px;
left: 50px;
top: 9px;
color: @white;
@media (max-width: (@screen-xs-max - 100px)) {
left: 20px;
&:not(.fesl-row-selected) {
&:nth-child(even) {
background-color: @list-row-even-bg;
}
}
&[data-type="folder"] {
@media (max-width: (@screen-xs-max - 100px)) {
.fesl-item {
&.fi-name {
padding-top: 10px;
padding-bottom: 7px;
}
&:hover {
.fis-icon {
&:before {
.opacity(0)
}
}
&.fi-size,
&.fi-modified {
display: none;
}
.fis-helper {
&:before {
.opacity(1);
}
}
}
@@ -113,54 +103,18 @@ div.fesl-row {
/*--------------------------
Icons
----------------------------*/
&[data-type=folder]:before {
content: '\f114';
background-color: #a1d6dd;
}
&[data-type=pdf]:before {
content: "\f1c1";
background-color: #fa7775;
}
&[data-type=zip]:before {
content: "\f1c6";
background-color: #427089;
}
&[data-type=audio]:before {
content: "\f1c7";
background-color: #009688
}
&[data-type=code]:before {
content: "\f1c9";
background-color: #997867;
}
&[data-type=excel]:before {
content: "\f1c3";
background-color: #64c866;
}
&[data-type=image]:before {
content: "\f1c5";
background-color: #f06292;
}
&[data-type=video]:before {
content: "\f1c8";
background-color: #f8c363;
}
&[data-type=other]:before {
content: "\f016";
background-color: #afafaf;
}
&[data-type=text]:before {
content: "\f0f6";
background-color: #8a8a8a;
}
&[data-type=doc]:before {
content: "\f1c2";
background-color: #2196f5;
}
&[data-type=presentation]:before {
content: "\f1c4";
background-color: #896ea6;
}
&[data-type=folder] { .list-type(#a1d6dd, '\f114'); }
&[data-type=pdf] {.list-type(#fa7775, '\f1c1'); }
&[data-type=zip] { .list-type(#427089, '\f1c6'); }
&[data-type=audio] { .list-type(#009688, '\f1c7'); }
&[data-type=code] { .list-type(#997867, "\f1c9"); }
&[data-type=excel] { .list-type(#f1c3, '\f1c3'); }
&[data-type=image] { .list-type(#f06292, '\f1c5'); }
&[data-type=video] { .list-type(#f8c363, '\f1c8'); }
&[data-type=other] { .list-type(#afafaf, '\f016'); }
&[data-type=text] { .list-type(#8a8a8a, '\f0f6'); }
&[data-type=doc] { .list-type(#2196f5, '\f1c2'); }
&[data-type=presentation] { .list-type(#896ea6, '\f1c4'); }
&.fesl-loading{
&:before {
@@ -180,6 +134,113 @@ div.fesl-row {
}
}
.fesl-row-selected {
background-color: @list-row-selected-bg;
&, .fesl-item a {
color: darken(@text-color, 10%);
}
}
.fi-select {
float: left;
position: relative;
width: 35px;
height: 35px;
margin: 3px 0;
@media(max-width: (@screen-xs-max - 100px)) {
margin-right: 15px;
}
input {
position: absolute;
left: 0;
top: 0;
width: 35px;
height: 35px;
z-index: 20;
opacity: 0;
cursor: pointer;
&:checked {
& ~ .fis-icon {
background-color: #32393F;
&:before {
opacity: 0;
}
}
& ~ .fis-helper {
&:before {
.scale(0);
}
&:after {
.scale(1);
}
}
}
}
}
.fis-icon {
display: inline-block;
vertical-align: top;
border-radius: 50%;
width: 35px;
height: 35px;
.transition(background-color);
.transition-duration(250ms);
&:before {
width: 100%;
height: 100%;
text-align: center;
position: absolute;
border-radius: 50%;
font-family: @font-family-icon;
line-height: 35px;
font-size: 16px;
color: @white;
.transition(all);
.transition-duration(300ms);
font-style: normal;
}
}
.fis-helper {
&:before,
&:after {
position: absolute;
.transition(all);
.transition-duration(250ms);
}
&:before {
content: '';
width: 15px;
height: 15px;
border: 2px solid @white;
z-index: 10;
border-radius: 2px;
top: 10px;
left: 10px;
opacity: 0;
}
&:after {
font-family: @font-family-icon;
content: '\f00c';
top: 8px;
left: 9px;
color: @white;
font-size: 14px;
.scale(0);
}
}
/*--------------------------
Files and Folders
@@ -192,26 +253,26 @@ div.fesl-row {
}
@media(min-width: (@screen-sm-min - 100px)) {
&:not(.fi-actions) {
&:not(.fesl-item-actions):not(.fesl-item-icon) {
text-overflow: ellipsis;
padding: 10px 15px;
white-space: nowrap;
overflow: hidden;
}
&.fi-name {
&.fesl-item-name {
flex: 3;
}
&.fi-size {
&.fesl-item-size {
width: 140px;
}
&.fi-modified {
&.fesl-item-modified {
width: 190px;
}
&.fi-actions {
&.fesl-item-actions {
width: 40px;
}
}
@@ -219,29 +280,29 @@ div.fesl-row {
@media(max-width: (@screen-xs-max - 100px)) {
padding: 0;
&.fi-name {
&.fesl-item-name {
width: 100%;
margin-bottom: 3px;
}
&.fi-size,
&.fi-modified {
&.fesl-item-size,
&.fesl-item-modified {
font-size: 12px;
color: #B5B5B5;
float: left;
}
&.fi-modified {
&.fesl-item-modified {
max-width: 72px;
white-space: nowrap;
overflow: hidden;
}
&.fi-size {
&.fesl-item-size {
margin-right: 10px;
}
&.fi-actions {
&.fesl-item-actions {
position: absolute;
top: 5px;
right: 10px;
@@ -266,7 +327,7 @@ div.fesl-row {
}
}
.fi-actions {
.fesl-item-actions {
.dropdown-menu {
background-color: transparent;
box-shadow: none;
@@ -324,6 +385,79 @@ div.fesl-row {
}
}
.list-actions {
position: fixed;
.translate3d(0, -100%, 0);
.opacity(0);
.transition(all);
.transition-duration(200ms);
padding: 20px 70px 20px 25px;
top: 0;
left: 0;
width: 100%;
background-color: @brand-primary;
z-index: 20;
box-shadow: 0 0 10px rgba(0,0,0,0.3);
text-align: center;
&.list-actions-toggled {
.translate3d(0, 0, 0);
.opacity(1);
}
}
.la-close {
position: absolute;
right: 20px;
top: 0;
color: #fff;
width: 30px;
height: 30px;
border-radius: 50%;
text-align: center;
line-height: 30px !important;
background: rgba(255, 255, 255, 0.1);
font-weight: normal;
bottom: 0;
margin: auto;
cursor: pointer;
&:hover {
background-color: rgba(255, 255, 255, 0.2);
}
}
.la-label {
color: @white;
float: left;
padding: 4px 0;
.fa {
font-size: 22px;
vertical-align: top;
margin-right: 10px;
margin-top: -1px;
}
}
.la-actions {
button {
background-color: transparent;
border: 2px solid rgba(255,255,255,0.9);
color: @white;
border-radius: 2px;
padding: 5px 10px;
font-size: 13px;
.transition(all);
.transition-duration(300ms);
margin-left: 10px;
&:hover {
background-color: @white;
color: @brand-primary;
}
}
}
@-webkit-keyframes fiad-action-anim {
from {

View File

@@ -99,4 +99,18 @@
content: '7 days';
right: 0;
}
}
.modal-aheader {
height: 100px;
&:before {
height: 0 !important;
}
.modal-dialog {
margin: 0;
vertical-align: top;
}
}

View File

@@ -49,4 +49,4 @@
z-index: 1;
-webkit-animation: zoomIn 250ms, spin 700ms 250ms infinite linear;
animation: zoomIn 250ms, spin 700ms 250ms infinite linear;
}
}

View File

@@ -7,7 +7,7 @@
position: fixed;
height: 100%;
overflow: hidden;
padding: 35px;
padding: 25px;
@media(min-width: @screen-md-min) {
.translate3d(0, 0, 0);
@@ -63,15 +63,15 @@
height: ~"calc(100vh - 260px)";
overflow: auto;
padding: 0;
margin: 0 -35px;
margin: 0 -25px;
& li {
position: relative;
& > a {
display: block;
padding: 10px 40px 12px 65px;
.text-overflow();
padding: 10px 45px 12px 55px;
word-wrap: break-word;
&:before {
font-family: FontAwesome;
@@ -79,7 +79,7 @@
font-size: 17px;
position: absolute;
top: 10px;
left: 35px;
left: 25px;
.opacity(0.8);
}
@@ -95,7 +95,7 @@
}
&.active {
background-color: rgba(0, 0, 0, 0.2);
background-color: #282e32;
& > a {
color: @white;
@@ -139,10 +139,10 @@
position: absolute;
top: 0;
right: 0;
width: 40px;
width: 35px;
height: 100%;
cursor: pointer;
background: url(../../img/more-h-light.svg) no-repeat left;
background: url(../../img/more-h-light.svg) no-repeat top 20px left;
}
/* Scrollbar */

View File

@@ -13,7 +13,7 @@
/*--------------------------
File Explorer
----------------------------*/
@fe-sidebar-width : 300px;
@fe-sidebar-width : 320px;
@text-muted-color : #BDBDBD;
@text-strong-color : #333;
@@ -81,7 +81,7 @@
/*-------------------------
Colors
--------------------------*/
@brand-primary: #2196F3;
@brand-primary: #2298f7;
@brand-success: #4CAF50;
@brand-info: #00BCD4;
@brand-warning: #FF9800;
@@ -91,4 +91,11 @@
/*-------------------------
Form
--------------------------*/
@input-border: #eee;
@input-border: #eee;
/*-------------------------
List
--------------------------*/
@list-row-selected-bg: #fbf2bf;
@list-row-even-bg: #fafafa;

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -70,9 +70,9 @@ async.waterfall([
commitId = stdout.replace('\n', '')
if (commitId.length !== 40) throw new Error('commitId invalid : ' + commitId)
assetsFileName = 'ui-assets.go';
var cmd = 'go-bindata-assetfs -pkg miniobrowser -nocompress=true production/...'
var cmd = 'go-bindata-assetfs -pkg browser -nocompress=true production/...'
if (!isProduction) {
cmd = 'go-bindata-assetfs -pkg miniobrowser -nocompress=true dev/...'
cmd = 'go-bindata-assetfs -pkg browser -nocompress=true dev/...'
}
console.log('Running', cmd)
exec(cmd, cb)

View File

@@ -1,5 +1,5 @@
{
"name": "minio-browser",
"name": "browser",
"version": "0.0.1",
"description": "Minio Browser",
"scripts": {
@@ -11,14 +11,14 @@
},
"repository": {
"type": "git",
"url": "https://github.com/minio/miniobrowser"
"url": "https://github.com/minio/minio"
},
"author": "Minio Inc",
"license": "Apache-2.0",
"bugs": {
"url": "https://github.com/minio/miniobrowser/issues"
"url": "https://github.com/minio/minio/issues"
},
"homepage": "https://github.com/minio/miniobrowser",
"homepage": "https://github.com/minio/minio",
"devDependencies": {
"async": "^1.5.2",
"babel-cli": "^6.14.0",
@@ -26,6 +26,7 @@
"babel-loader": "^6.2.5",
"babel-plugin-syntax-object-rest-spread": "^6.13.0",
"babel-plugin-transform-object-rest-spread": "^6.8.0",
"babel-polyfill": "^6.23.0",
"babel-preset-es2015": "^6.14.0",
"babel-preset-react": "^6.11.1",
"babel-register": "^6.14.0",

File diff suppressed because one or more lines are too long

View File

@@ -1,5 +1,5 @@
/*
* Minio Browser (C) 2016 Minio, Inc.
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,6 +22,7 @@ var purify = require("purifycss-webpack-plugin")
var exports = {
context: __dirname,
entry: [
"babel-polyfill",
path.resolve(__dirname, 'app/index.js')
],
output: {
@@ -71,6 +72,10 @@ var exports = {
target: 'http://localhost:9000',
secure: false
},
'/minio/zip': {
target: 'http://localhost:9000',
secure: false
}
}
},
plugins: [
@@ -96,6 +101,7 @@ var exports = {
if (process.env.NODE_ENV === 'dev') {
exports.entry = [
"babel-polyfill",
'webpack/hot/dev-server',
'webpack-dev-server/client?http://localhost:8080',
path.resolve(__dirname, 'app/index.js')

View File

@@ -53,7 +53,7 @@ go_build() {
release_bin_6="$release_str/$os-${arch}6vl/$(basename $package).$release_tag"
## Support building for ARM6vl
GOARM=6 GOOS=$os GOARCH=$arch go build --ldflags "${LDFLAGS}" -o $release_bin_6
GOARM=6 GOOS=$os GOARCH=$arch go build --ldflags "-s -w ${LDFLAGS}" -o $release_bin_6
## Copy
$CP -p $release_bin_6 $release_real_bin_6
@@ -70,7 +70,7 @@ go_build() {
release_bin_7="$release_str/$os-$arch/$(basename $package).$release_tag"
## Support building for ARM7vl
GOARM=7 GOOS=$os GOARCH=$arch go build --ldflags "${LDFLAGS}" -o $release_bin_7
GOARM=7 GOOS=$os GOARCH=$arch go build --ldflags "-s -w ${LDFLAGS}" -o $release_bin_7
## Copy
$CP -p $release_bin_7 $release_real_bin_7
@@ -82,7 +82,7 @@ go_build() {
shasum_str=$(${SHASUM} ${release_bin_7})
echo ${shasum_str} | $SED "s/$release_str\/$os-$arch\///g" > $release_shasum_7
else
GOOS=$os GOARCH=$arch go build --ldflags "${LDFLAGS}" -o $release_bin
GOOS=$os GOARCH=$arch go build --ldflags "-s -w ${LDFLAGS}" -o $release_bin
# Create copy
if [ $os == "windows" ]; then

View File

@@ -17,8 +17,10 @@
package cmd
import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"io/ioutil"
"net/http"
"net/url"
@@ -27,22 +29,26 @@ import (
)
const (
minioAdminOpHeader = "X-Minio-Operation"
minioAdminOpHeader = "X-Minio-Operation"
minioConfigTmpFormat = "config-%s.json"
)
// Type-safe query params.
type mgmtQueryKey string
// Only valid query params for list/clear locks management APIs.
// Only valid query params for mgmt admin APIs.
const (
mgmtBucket mgmtQueryKey = "bucket"
mgmtObject mgmtQueryKey = "object"
mgmtPrefix mgmtQueryKey = "prefix"
mgmtLockDuration mgmtQueryKey = "duration"
mgmtDelimiter mgmtQueryKey = "delimiter"
mgmtMarker mgmtQueryKey = "marker"
mgmtMaxKey mgmtQueryKey = "max-key"
mgmtDryRun mgmtQueryKey = "dry-run"
mgmtBucket mgmtQueryKey = "bucket"
mgmtObject mgmtQueryKey = "object"
mgmtPrefix mgmtQueryKey = "prefix"
mgmtLockDuration mgmtQueryKey = "duration"
mgmtDelimiter mgmtQueryKey = "delimiter"
mgmtMarker mgmtQueryKey = "marker"
mgmtKeyMarker mgmtQueryKey = "key-marker"
mgmtMaxKey mgmtQueryKey = "max-key"
mgmtDryRun mgmtQueryKey = "dry-run"
mgmtUploadIDMarker mgmtQueryKey = "upload-id-marker"
mgmtMaxUploads mgmtQueryKey = "max-uploads"
)
// ServerVersion - server version
@@ -159,18 +165,12 @@ func (adminAPI adminAPIHandlers) ServiceCredentialsHandler(w http.ResponseWriter
return
}
// Check passed credentials
err = validateAuthKeys(req.Username, req.Password)
creds, err := createCredential(req.Username, req.Password)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
creds := credential{
AccessKey: req.Username,
SecretKey: req.Password,
}
// Notify all other Minio peers to update credentials
updateErrs := updateCredsOnPeers(creds)
for peer, err := range updateErrs {
@@ -397,8 +397,57 @@ func (adminAPI adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *htt
writeSuccessResponseJSON(w, jsonBytes)
}
// validateHealQueryParams - Validates query params for heal list management API.
func validateHealQueryParams(vars url.Values) (string, string, string, string, int, APIErrorCode) {
// ListUploadsHealHandler - similar to listObjectsHealHandler
// GET
// /?heal&bucket=mybucket&prefix=myprefix&key-marker=mymarker&upload-id-marker=myuploadid&delimiter=mydelimiter&max-uploads=1000
// - bucket is mandatory query parameter
// - rest are optional query parameters List upto maxKey objects that
// need healing in a given bucket matching the given prefix.
func (adminAPI adminAPIHandlers) ListUploadsHealHandler(w http.ResponseWriter, r *http.Request) {
// Get object layer instance.
objLayer := newObjectLayerFn()
if objLayer == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
// Validate request signature.
adminAPIErr := checkRequestAuthType(r, "", "", "")
if adminAPIErr != ErrNone {
writeErrorResponse(w, adminAPIErr, r.URL)
return
}
// Validate query params.
vars := r.URL.Query()
bucket := vars.Get(string(mgmtBucket))
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, _ := getBucketMultipartResources(r.URL.Query())
if err := checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter, objLayer); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
if maxUploads <= 0 || maxUploads > maxUploadsList {
writeErrorResponse(w, ErrInvalidMaxUploads, r.URL)
return
}
// Get the list objects to be healed.
listMultipartInfos, err := objLayer.ListUploadsHeal(bucket, prefix,
keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
listResponse := generateListMultipartUploadsResponse(bucket, listMultipartInfos)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(listResponse))
}
// extractListObjectsHealQuery - Validates query params for heal objects list management API.
func extractListObjectsHealQuery(vars url.Values) (string, string, string, string, int, APIErrorCode) {
bucket := vars.Get(string(mgmtBucket))
prefix := vars.Get(string(mgmtPrefix))
marker := vars.Get(string(mgmtMarker))
@@ -415,10 +464,13 @@ func validateHealQueryParams(vars url.Values) (string, string, string, string, i
return "", "", "", "", 0, ErrInvalidObjectName
}
// check if maxKey is a valid integer.
maxKey, err := strconv.Atoi(maxKeyStr)
if err != nil {
return "", "", "", "", 0, ErrInvalidMaxKeys
// check if maxKey is a valid integer, if present.
var maxKey int
var err error
if maxKeyStr != "" {
if maxKey, err = strconv.Atoi(maxKeyStr); err != nil {
return "", "", "", "", 0, ErrInvalidMaxKeys
}
}
// Validate prefix, marker, delimiter and maxKey.
@@ -451,7 +503,7 @@ func (adminAPI adminAPIHandlers) ListObjectsHealHandler(w http.ResponseWriter, r
// Validate query params.
vars := r.URL.Query()
bucket, prefix, marker, delimiter, maxKey, adminAPIErr := validateHealQueryParams(vars)
bucket, prefix, marker, delimiter, maxKey, adminAPIErr := extractListObjectsHealQuery(vars)
if adminAPIErr != ErrNone {
writeErrorResponse(w, adminAPIErr, r.URL)
return
@@ -672,3 +724,152 @@ func (adminAPI adminAPIHandlers) HealFormatHandler(w http.ResponseWriter, r *htt
// Return 200 on success.
writeSuccessResponseHeadersOnly(w)
}
// GetConfigHandler - GET /?config
// - x-minio-operation = get
// Get config.json of this minio setup.
func (adminAPI adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
// Validate request signature.
adminAPIErr := checkRequestAuthType(r, "", "", "")
if adminAPIErr != ErrNone {
writeErrorResponse(w, adminAPIErr, r.URL)
return
}
// check if objectLayer is initialized, if not return.
if newObjectLayerFn() == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
// Get config.json from all nodes. In a single node setup, it
// returns local config.json.
configBytes, err := getPeerConfig(globalAdminPeers)
if err != nil {
errorIf(err, "Failed to get config from peers")
writeErrorResponse(w, toAdminAPIErrCode(err), r.URL)
return
}
writeSuccessResponseJSON(w, configBytes)
}
// toAdminAPIErrCode - converts errXLWriteQuorum error to admin API
// specific error.
func toAdminAPIErrCode(err error) APIErrorCode {
switch err {
case errXLWriteQuorum:
return ErrAdminConfigNoQuorum
}
return toAPIErrorCode(err)
}
// SetConfigResult - represents detailed results of a set-config
// operation.
type nodeSummary struct {
Name string `json:"name"`
ErrSet bool `json:"errSet"`
ErrMsg string `json:"errMsg"`
}
type setConfigResult struct {
NodeResults []nodeSummary `json:"nodeResults"`
Status bool `json:"status"`
}
// writeSetConfigResponse - writes setConfigResult value as json depending on the status.
func writeSetConfigResponse(w http.ResponseWriter, peers adminPeers, errs []error, status bool, reqURL *url.URL) {
var nodeResults []nodeSummary
// Build nodeResults based on error values received during
// set-config operation.
for i := range errs {
nodeResults = append(nodeResults, nodeSummary{
Name: peers[i].addr,
ErrSet: errs[i] != nil,
ErrMsg: fmt.Sprintf("%v", errs[i]),
})
}
result := setConfigResult{
Status: status,
NodeResults: nodeResults,
}
// The following elaborate json encoding is to avoid escaping
// '<', '>' in <nil>. Note: json.Encoder.Encode() adds a
// gratuitous "\n".
var resultBuf bytes.Buffer
enc := json.NewEncoder(&resultBuf)
enc.SetEscapeHTML(false)
jsonErr := enc.Encode(result)
if jsonErr != nil {
writeErrorResponse(w, toAPIErrorCode(jsonErr), reqURL)
return
}
writeSuccessResponseJSON(w, resultBuf.Bytes())
return
}
// SetConfigHandler - PUT /?config
// - x-minio-operation = set
func (adminAPI adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
// Validate request signature.
adminAPIErr := checkRequestAuthType(r, "", "", "")
if adminAPIErr != ErrNone {
writeErrorResponse(w, adminAPIErr, r.URL)
return
}
// Read configuration bytes from request body.
configBytes, err := ioutil.ReadAll(r.Body)
if err != nil {
errorIf(err, "Failed to read config from request body.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Write config received from request onto a temporary file on
// all nodes.
tmpFileName := fmt.Sprintf(minioConfigTmpFormat, mustGetUUID())
errs := writeTmpConfigPeers(globalAdminPeers, tmpFileName, configBytes)
// Check if the operation succeeded in quorum or more nodes.
rErr := reduceWriteQuorumErrs(errs, nil, len(globalAdminPeers)/2+1)
if rErr != nil {
writeSetConfigResponse(w, globalAdminPeers, errs, false, r.URL)
return
}
// Take a lock on minio/config.json. NB minio is a reserved
// bucket name and wouldn't conflict with normal object
// operations.
configLock := globalNSMutex.NewNSLock(minioReservedBucket, minioConfigFile)
configLock.Lock()
defer configLock.Unlock()
// Rename the temporary config file to config.json
errs = commitConfigPeers(globalAdminPeers, tmpFileName)
rErr = reduceWriteQuorumErrs(errs, nil, len(globalAdminPeers)/2+1)
if rErr != nil {
writeSetConfigResponse(w, globalAdminPeers, errs, false, r.URL)
return
}
// serverMux (cmd/server-mux.go) implements graceful shutdown,
// where all listeners are closed and process restart/shutdown
// happens after 5s or completion of all ongoing http
// requests, whichever is earlier.
writeSetConfigResponse(w, globalAdminPeers, errs, true, r.URL)
// Restart all node for the modified config to take effect.
sendServiceCmd(globalAdminPeers, serviceRestart)
}

View File

@@ -20,6 +20,7 @@ import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
@@ -30,6 +31,102 @@ import (
router "github.com/gorilla/mux"
)
var configJSON = []byte(`{
"version": "13",
"credential": {
"accessKey": "minio",
"secretKey": "minio123"
},
"region": "us-west-1",
"logger": {
"console": {
"enable": true,
"level": "fatal"
},
"file": {
"enable": false,
"fileName": "",
"level": ""
}
},
"notify": {
"amqp": {
"1": {
"enable": false,
"url": "",
"exchange": "",
"routingKey": "",
"exchangeType": "",
"mandatory": false,
"immediate": false,
"durable": false,
"internal": false,
"noWait": false,
"autoDeleted": false
}
},
"nats": {
"1": {
"enable": false,
"address": "",
"subject": "",
"username": "",
"password": "",
"token": "",
"secure": false,
"pingInterval": 0,
"streaming": {
"enable": false,
"clusterID": "",
"clientID": "",
"async": false,
"maxPubAcksInflight": 0
}
}
},
"elasticsearch": {
"1": {
"enable": false,
"url": "",
"index": ""
}
},
"redis": {
"1": {
"enable": false,
"address": "",
"password": "",
"key": ""
}
},
"postgresql": {
"1": {
"enable": false,
"connectionString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"kafka": {
"1": {
"enable": false,
"brokers": null,
"topic": ""
}
},
"webhook": {
"1": {
"enable": false,
"endpoint": ""
}
}
}
}`)
// adminXLTestBed - encapsulates subsystems that need to be setup for
// admin-handler unit tests.
type adminXLTestBed struct {
@@ -643,7 +740,7 @@ func TestValidateHealQueryParams(t *testing.T) {
}
for i, test := range testCases {
vars := mkListObjectsQueryVal(test.bucket, test.prefix, test.marker, test.delimiter, test.maxKeys)
_, _, _, _, _, actualErr := validateHealQueryParams(vars)
_, _, _, _, _, actualErr := extractListObjectsHealQuery(vars)
if actualErr != test.apiErr {
t.Errorf("Test %d - Expected %v but received %v",
i+1, getAPIError(test.apiErr), getAPIError(actualErr))
@@ -759,9 +856,6 @@ func TestListObjectsHealHandler(t *testing.T) {
}
for i, test := range testCases {
if i != 0 {
continue
}
queryVal := mkListObjectsQueryVal(test.bucket, test.prefix, test.marker, test.delimiter, test.maxKeys)
req, err := newTestRequest("GET", "/?"+queryVal.Encode(), 0, nil)
if err != nil {
@@ -988,3 +1082,342 @@ func TestHealFormatHandler(t *testing.T) {
t.Errorf("Expected to succeed but failed with %d", rec.Code)
}
}
// TestGetConfigHandler - test for GetConfigHandler.
func TestGetConfigHandler(t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls.
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
if err != nil {
t.Fatalf("Failed to parse storage end point - %v", err)
}
// Set globalMinioAddr to be able to distinguish local endpoints from remote.
globalMinioAddr = eps[0].Host
initGlobalAdminPeers(eps)
// Prepare query params for get-config mgmt REST API.
queryVal := url.Values{}
queryVal.Set("config", "")
req, err := newTestRequest("GET", "/?"+queryVal.Encode(), 0, nil)
if err != nil {
t.Fatalf("Failed to construct get-config object request - %v", err)
}
// Set x-minio-operation header to get.
req.Header.Set(minioAdminOpHeader, "get")
// Sign the request using signature v4.
cred := serverConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
t.Fatalf("Failed to sign heal object request - %v", err)
}
rec := httptest.NewRecorder()
adminTestBed.mux.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
t.Errorf("Expected to succeed but failed with %d", rec.Code)
}
}
// TestSetConfigHandler - test for SetConfigHandler.
func TestSetConfigHandler(t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls.
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
if err != nil {
t.Fatalf("Failed to parse storage end point - %v", err)
}
// Set globalMinioAddr to be able to distinguish local endpoints from remote.
globalMinioAddr = eps[0].Host
initGlobalAdminPeers(eps)
// SetConfigHandler restarts minio setup - need to start a
// signal receiver to receive on globalServiceSignalCh.
go testServiceSignalReceiver(restartCmd, t)
// Prepare query params for set-config mgmt REST API.
queryVal := url.Values{}
queryVal.Set("config", "")
req, err := newTestRequest("PUT", "/?"+queryVal.Encode(), int64(len(configJSON)), bytes.NewReader(configJSON))
if err != nil {
t.Fatalf("Failed to construct get-config object request - %v", err)
}
// Set x-minio-operation header to set.
req.Header.Set(minioAdminOpHeader, "set")
// Sign the request using signature v4.
cred := serverConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
t.Fatalf("Failed to sign heal object request - %v", err)
}
rec := httptest.NewRecorder()
adminTestBed.mux.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
t.Errorf("Expected to succeed but failed with %d", rec.Code)
}
result := setConfigResult{}
err = json.NewDecoder(rec.Body).Decode(&result)
if err != nil {
t.Fatalf("Failed to decode set config result json %v", err)
}
if !result.Status {
t.Error("Expected set-config to succeed, but failed")
}
}
// TestToAdminAPIErr - test for toAdminAPIErr helper function.
func TestToAdminAPIErr(t *testing.T) {
testCases := []struct {
err error
expectedAPIErr APIErrorCode
}{
// 1. Server not in quorum.
{
err: errXLWriteQuorum,
expectedAPIErr: ErrAdminConfigNoQuorum,
},
// 2. No error.
{
err: nil,
expectedAPIErr: ErrNone,
},
// 3. Non-admin API specific error.
{
err: errDiskNotFound,
expectedAPIErr: toAPIErrorCode(errDiskNotFound),
},
}
for i, test := range testCases {
actualErr := toAdminAPIErrCode(test.err)
if actualErr != test.expectedAPIErr {
t.Errorf("Test %d: Expected %v but received %v",
i+1, test.expectedAPIErr, actualErr)
}
}
}
func TestWriteSetConfigResponse(t *testing.T) {
testCases := []struct {
status bool
errs []error
}{
// 1. all nodes returned success.
{
status: true,
errs: []error{nil, nil, nil, nil},
},
// 2. some nodes returned errors.
{
status: false,
errs: []error{errDiskNotFound, nil, errDiskAccessDenied, errFaultyDisk},
},
}
testPeers := []adminPeer{
{
addr: "localhost:9001",
},
{
addr: "localhost:9002",
},
{
addr: "localhost:9003",
},
{
addr: "localhost:9004",
},
}
testURL, err := url.Parse("dummy.com")
if err != nil {
t.Fatalf("Failed to parse a place-holder url")
}
var actualResult setConfigResult
for i, test := range testCases {
rec := httptest.NewRecorder()
writeSetConfigResponse(rec, testPeers, test.errs, test.status, testURL)
resp := rec.Result()
jsonBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("Test %d: Failed to read response %v", i+1, err)
}
err = json.Unmarshal(jsonBytes, &actualResult)
if err != nil {
t.Fatalf("Test %d: Failed to unmarshal json %v", i+1, err)
}
if actualResult.Status != test.status {
t.Errorf("Test %d: Expected status %v but received %v", i+1, test.status, actualResult.Status)
}
for p, res := range actualResult.NodeResults {
if res.Name != testPeers[p].addr {
t.Errorf("Test %d: Expected node name %s but received %s", i+1, testPeers[p].addr, res.Name)
}
expectedErrMsg := fmt.Sprintf("%v", test.errs[p])
if res.ErrMsg != expectedErrMsg {
t.Errorf("Test %d: Expected error %s but received %s", i+1, expectedErrMsg, res.ErrMsg)
}
expectedErrSet := test.errs[p] != nil
if res.ErrSet != expectedErrSet {
t.Errorf("Test %d: Expected ErrSet %v but received %v", i+1, expectedErrSet, res.ErrSet)
}
}
}
}
// mkUploadsHealQuery - helper function to construct query values for
// listUploadsHeal.
func mkUploadsHealQuery(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploadsStr string) url.Values {
queryVal := make(url.Values)
queryVal.Set("heal", "")
queryVal.Set(string(mgmtBucket), bucket)
queryVal.Set(string(mgmtPrefix), prefix)
queryVal.Set(string(mgmtKeyMarker), keyMarker)
queryVal.Set(string(mgmtUploadIDMarker), uploadIDMarker)
queryVal.Set(string(mgmtDelimiter), delimiter)
queryVal.Set(string(mgmtMaxUploads), maxUploadsStr)
return queryVal
}
func TestListHealUploadsHandler(t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
err = adminTestBed.objLayer.MakeBucket("mybucket")
if err != nil {
t.Fatalf("Failed to make bucket - %v", err)
}
// Delete bucket after running all test cases.
defer adminTestBed.objLayer.DeleteBucket("mybucket")
testCases := []struct {
bucket string
prefix string
keyMarker string
delimiter string
maxKeys string
statusCode int
}{
// 1. Valid params.
{
bucket: "mybucket",
prefix: "prefix",
keyMarker: "prefix11",
delimiter: "/",
maxKeys: "10",
statusCode: http.StatusOK,
},
// 2. Valid params with empty prefix.
{
bucket: "mybucket",
prefix: "",
keyMarker: "",
delimiter: "/",
maxKeys: "10",
statusCode: http.StatusOK,
},
// 3. Invalid params with invalid bucket.
{
bucket: `invalid\\Bucket`,
prefix: "prefix",
keyMarker: "prefix11",
delimiter: "/",
maxKeys: "10",
statusCode: getAPIError(ErrInvalidBucketName).HTTPStatusCode,
},
// 4. Invalid params with invalid prefix.
{
bucket: "mybucket",
prefix: `invalid\\Prefix`,
keyMarker: "prefix11",
delimiter: "/",
maxKeys: "10",
statusCode: getAPIError(ErrInvalidObjectName).HTTPStatusCode,
},
// 5. Invalid params with invalid maxKeys.
{
bucket: "mybucket",
prefix: "prefix",
keyMarker: "prefix11",
delimiter: "/",
maxKeys: "-1",
statusCode: getAPIError(ErrInvalidMaxUploads).HTTPStatusCode,
},
// 6. Invalid params with unsupported prefix marker combination.
{
bucket: "mybucket",
prefix: "prefix",
keyMarker: "notmatchingmarker",
delimiter: "/",
maxKeys: "10",
statusCode: getAPIError(ErrNotImplemented).HTTPStatusCode,
},
// 7. Invalid params with unsupported delimiter.
{
bucket: "mybucket",
prefix: "prefix",
keyMarker: "notmatchingmarker",
delimiter: "unsupported",
maxKeys: "10",
statusCode: getAPIError(ErrNotImplemented).HTTPStatusCode,
},
// 8. Invalid params with invalid max Keys
{
bucket: "mybucket",
prefix: "prefix",
keyMarker: "prefix11",
delimiter: "/",
maxKeys: "999999999999999999999999999",
statusCode: getAPIError(ErrInvalidMaxUploads).HTTPStatusCode,
},
}
for i, test := range testCases {
queryVal := mkUploadsHealQuery(test.bucket, test.prefix, test.keyMarker, "", test.delimiter, test.maxKeys)
req, err := newTestRequest("GET", "/?"+queryVal.Encode(), 0, nil)
if err != nil {
t.Fatalf("Test %d - Failed to construct list uploads needing heal request - %v", i+1, err)
}
req.Header.Set(minioAdminOpHeader, "list-uploads")
cred := serverConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
t.Fatalf("Test %d - Failed to sign list uploads needing heal request - %v", i+1, err)
}
rec := httptest.NewRecorder()
adminTestBed.mux.ServeHTTP(rec, req)
if test.statusCode != rec.Code {
t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.statusCode, rec.Code)
}
}
}

View File

@@ -53,6 +53,8 @@ func registerAdminRouter(mux *router.Router) {
// List Objects needing heal.
adminRouter.Methods("GET").Queries("heal", "").Headers(minioAdminOpHeader, "list-objects").HandlerFunc(adminAPI.ListObjectsHealHandler)
// List Uploads needing heal.
adminRouter.Methods("GET").Queries("heal", "").Headers(minioAdminOpHeader, "list-uploads").HandlerFunc(adminAPI.ListUploadsHealHandler)
// List Buckets needing heal.
adminRouter.Methods("GET").Queries("heal", "").Headers(minioAdminOpHeader, "list-buckets").HandlerFunc(adminAPI.ListBucketsHealHandler)
@@ -62,4 +64,11 @@ func registerAdminRouter(mux *router.Router) {
adminRouter.Methods("POST").Queries("heal", "").Headers(minioAdminOpHeader, "object").HandlerFunc(adminAPI.HealObjectHandler)
// Heal Format.
adminRouter.Methods("POST").Queries("heal", "").Headers(minioAdminOpHeader, "format").HandlerFunc(adminAPI.HealFormatHandler)
/// Config operations
// Get config
adminRouter.Methods("GET").Queries("config", "").Headers(minioAdminOpHeader, "get").HandlerFunc(adminAPI.GetConfigHandler)
// Set Config
adminRouter.Methods("PUT").Queries("config", "").Headers(minioAdminOpHeader, "set").HandlerFunc(adminAPI.SetConfigHandler)
}

View File

@@ -17,13 +17,30 @@
package cmd
import (
"encoding/json"
"errors"
"fmt"
"net/url"
"os"
"path"
"path/filepath"
"reflect"
"sort"
"sync"
"time"
)
const (
// Admin service names
serviceRestartRPC = "Admin.Restart"
listLocksRPC = "Admin.ListLocks"
reInitDisksRPC = "Admin.ReInitDisks"
uptimeRPC = "Admin.Uptime"
getConfigRPC = "Admin.GetConfig"
writeTmpConfigRPC = "Admin.WriteTmpConfig"
commitConfigRPC = "Admin.CommitConfig"
)
// localAdminClient - represents admin operation to be executed locally.
type localAdminClient struct {
}
@@ -41,6 +58,9 @@ type adminCmdRunner interface {
ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error)
ReInitDisks() error
Uptime() (time.Duration, error)
GetConfig() ([]byte, error)
WriteTmpConfig(tmpFileName string, configBytes []byte) error
CommitConfig(tmpFileName string) error
}
// Restart - Sends a message over channel to the go-routine
@@ -59,7 +79,7 @@ func (lc localAdminClient) ListLocks(bucket, prefix string, duration time.Durati
func (rc remoteAdminClient) Restart() error {
args := AuthRPCArgs{}
reply := AuthRPCReply{}
return rc.Call("Admin.Restart", &args, &reply)
return rc.Call(serviceRestartRPC, &args, &reply)
}
// ListLocks - Sends list locks command to remote server via RPC.
@@ -70,7 +90,7 @@ func (rc remoteAdminClient) ListLocks(bucket, prefix string, duration time.Durat
duration: duration,
}
var reply ListLocksReply
if err := rc.Call("Admin.ListLocks", &listArgs, &reply); err != nil {
if err := rc.Call(listLocksRPC, &listArgs, &reply); err != nil {
return nil, err
}
return reply.volLocks, nil
@@ -87,7 +107,7 @@ func (lc localAdminClient) ReInitDisks() error {
func (rc remoteAdminClient) ReInitDisks() error {
args := AuthRPCArgs{}
reply := AuthRPCReply{}
return rc.Call("Admin.ReInitDisks", &args, &reply)
return rc.Call(reInitDisksRPC, &args, &reply)
}
// Uptime - Returns the uptime of this server. Timestamp is taken
@@ -104,7 +124,7 @@ func (lc localAdminClient) Uptime() (time.Duration, error) {
func (rc remoteAdminClient) Uptime() (time.Duration, error) {
args := AuthRPCArgs{}
reply := UptimeReply{}
err := rc.Call("Admin.Uptime", &args, &reply)
err := rc.Call(uptimeRPC, &args, &reply)
if err != nil {
return time.Duration(0), err
}
@@ -112,6 +132,75 @@ func (rc remoteAdminClient) Uptime() (time.Duration, error) {
return reply.Uptime, nil
}
// GetConfig - returns config.json of the local server.
func (lc localAdminClient) GetConfig() ([]byte, error) {
if serverConfig == nil {
return nil, errors.New("config not present")
}
return json.Marshal(serverConfig)
}
// GetConfig - returns config.json of the remote server.
func (rc remoteAdminClient) GetConfig() ([]byte, error) {
args := AuthRPCArgs{}
reply := ConfigReply{}
if err := rc.Call(getConfigRPC, &args, &reply); err != nil {
return nil, err
}
return reply.Config, nil
}
// WriteTmpConfig - writes config file content to a temporary file on
// the local server.
func (lc localAdminClient) WriteTmpConfig(tmpFileName string, configBytes []byte) error {
return writeTmpConfigCommon(tmpFileName, configBytes)
}
// WriteTmpConfig - writes config file content to a temporary file on
// a remote node.
func (rc remoteAdminClient) WriteTmpConfig(tmpFileName string, configBytes []byte) error {
wArgs := WriteConfigArgs{
TmpFileName: tmpFileName,
Buf: configBytes,
}
err := rc.Call(writeTmpConfigRPC, &wArgs, &WriteConfigReply{})
if err != nil {
errorIf(err, "Failed to write temporary config file.")
return err
}
return nil
}
// CommitConfig - Move the new config in tmpFileName onto config.json
// on a local node.
func (lc localAdminClient) CommitConfig(tmpFileName string) error {
configFile := getConfigFile()
tmpConfigFile := filepath.Join(getConfigDir(), tmpFileName)
err := os.Rename(tmpConfigFile, configFile)
errorIf(err, fmt.Sprintf("Failed to rename %s to %s", tmpConfigFile, configFile))
return err
}
// CommitConfig - Move the new config in tmpFileName onto config.json
// on a remote node.
func (rc remoteAdminClient) CommitConfig(tmpFileName string) error {
cArgs := CommitConfigArgs{
FileName: tmpFileName,
}
cReply := CommitConfigReply{}
err := rc.Call(commitConfigRPC, &cArgs, &cReply)
if err != nil {
errorIf(err, "Failed to rename config file.")
return err
}
return nil
}
// adminPeer - represents an entity that implements Restart methods.
type adminPeer struct {
addr string
@@ -150,7 +239,7 @@ func makeAdminPeers(eps []*url.URL) adminPeers {
secretKey: serverCred.SecretKey,
serverAddr: ep.Host,
secureConn: globalIsSSL,
serviceEndpoint: path.Join(reservedBucket, adminPath),
serviceEndpoint: path.Join(minioReservedBucketPath, adminPath),
serviceName: "Admin",
}
@@ -336,3 +425,185 @@ func getPeerUptimes(peers adminPeers) (time.Duration, error) {
return latestUptime, nil
}
// getPeerConfig - Fetches config.json from all nodes in the setup and
// returns the one that occurs in a majority of them.
func getPeerConfig(peers adminPeers) ([]byte, error) {
if !globalIsDistXL {
return peers[0].cmdRunner.GetConfig()
}
errs := make([]error, len(peers))
configs := make([][]byte, len(peers))
// Get config from all servers.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
configs[idx], errs[idx] = peer.cmdRunner.GetConfig()
}(i, peer)
}
wg.Wait()
// Find the maximally occurring config among peers in a
// distributed setup.
serverConfigs := make([]serverConfigV13, len(peers))
for i, configBytes := range configs {
if errs[i] != nil {
continue
}
// Unmarshal the received config files.
err := json.Unmarshal(configBytes, &serverConfigs[i])
if err != nil {
errorIf(err, "Failed to unmarshal serverConfig from ", peers[i].addr)
return nil, err
}
}
configJSON, err := getValidServerConfig(serverConfigs, errs)
if err != nil {
errorIf(err, "Unable to find a valid server config")
return nil, traceError(err)
}
// Return the config.json that was present quorum or more
// number of disks.
return json.Marshal(configJSON)
}
// getValidServerConfig - finds the server config that is present in
// quorum or more number of servers.
func getValidServerConfig(serverConfigs []serverConfigV13, errs []error) (serverConfigV13, error) {
// majority-based quorum
quorum := len(serverConfigs)/2 + 1
// Count the number of disks a config.json was found in.
configCounter := make([]int, len(serverConfigs))
// We group equal serverConfigs by the lowest index of the
// same value; e.g, let us take the following serverConfigs
// in a 4-node setup,
// serverConfigs == [c1, c2, c1, c1]
// configCounter == [3, 1, 0, 0]
// c1, c2 are the only distinct values that appear. c1 is
// identified by 0, the lowest index it appears in and c2 is
// identified by 1. So, we need to find the number of times
// each of these distinct values occur.
// Invariants:
// 1. At the beginning of the i-th iteration, the number of
// unique configurations seen so far is equal to the number of
// non-zero counter values in config[:i].
// 2. At the beginning of the i-th iteration, the sum of
// elements of configCounter[:i] is equal to the number of
// non-error configurations seen so far.
// For each of the serverConfig ...
for i := range serverConfigs {
// Skip nodes where getConfig failed.
if errs[i] != nil {
continue
}
// Check if it is equal to any of the configurations
// seen so far. If j == i is reached then we have an
// unseen configuration.
for j := 0; j <= i; j++ {
if j < i && configCounter[j] == 0 {
// serverConfigs[j] is known to be
// equal to a value that was already
// seen. See example above for
// clarity.
continue
} else if j < i && reflect.DeepEqual(serverConfigs[i], serverConfigs[j]) {
// serverConfigs[i] is equal to
// serverConfigs[j], update
// serverConfigs[j]'s counter since it
// is the lower index.
configCounter[j]++
break
} else if j == i {
// serverConfigs[i] is equal to no
// other value seen before. It is
// unique so far.
configCounter[i] = 1
break
} // else invariants specified above are violated.
}
}
// We find the maximally occurring server config and check if
// there is quorum.
var configJSON serverConfigV13
maxOccurrence := 0
for i, count := range configCounter {
if maxOccurrence < count {
maxOccurrence = count
configJSON = serverConfigs[i]
}
}
// If quorum nodes don't agree.
if maxOccurrence < quorum {
return serverConfigV13{}, errXLWriteQuorum
}
return configJSON, nil
}
// Write config contents into a temporary file on all nodes.
func writeTmpConfigPeers(peers adminPeers, tmpFileName string, configBytes []byte) []error {
// For a single-node minio server setup.
if !globalIsDistXL {
err := peers[0].cmdRunner.WriteTmpConfig(tmpFileName, configBytes)
return []error{err}
}
errs := make([]error, len(peers))
// Write config into temporary file on all nodes.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
errs[idx] = peer.cmdRunner.WriteTmpConfig(tmpFileName, configBytes)
}(i, peer)
}
wg.Wait()
// Return bytes written and errors (if any) during writing
// temporary config file.
return errs
}
// Move config contents from the given temporary file onto config.json
// on all nodes.
func commitConfigPeers(peers adminPeers, tmpFileName string) []error {
// For a single-node minio server setup.
if !globalIsDistXL {
return []error{peers[0].cmdRunner.CommitConfig(tmpFileName)}
}
errs := make([]error, len(peers))
// Rename temporary config file into configDir/config.json on
// all nodes.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
errs[idx] = peer.cmdRunner.CommitConfig(tmpFileName)
}(i, peer)
}
wg.Wait()
// Return errors (if any) received during rename.
return errs
}

View File

@@ -0,0 +1,260 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/json"
"reflect"
"testing"
)
var (
config1 = []byte(`{
"version": "13",
"credential": {
"accessKey": "minio",
"secretKey": "minio123"
},
"region": "us-east-1",
"logger": {
"console": {
"enable": true,
"level": "debug"
},
"file": {
"enable": false,
"fileName": "",
"level": ""
}
},
"notify": {
"amqp": {
"1": {
"enable": false,
"url": "",
"exchange": "",
"routingKey": "",
"exchangeType": "",
"mandatory": false,
"immediate": false,
"durable": false,
"internal": false,
"noWait": false,
"autoDeleted": false
}
},
"nats": {
"1": {
"enable": false,
"address": "",
"subject": "",
"username": "",
"password": "",
"token": "",
"secure": false,
"pingInterval": 0,
"streaming": {
"enable": false,
"clusterID": "",
"clientID": "",
"async": false,
"maxPubAcksInflight": 0
}
}
},
"elasticsearch": {
"1": {
"enable": false,
"url": "",
"index": ""
}
},
"redis": {
"1": {
"enable": false,
"address": "",
"password": "",
"key": ""
}
},
"postgresql": {
"1": {
"enable": false,
"connectionString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"kafka": {
"1": {
"enable": false,
"brokers": null,
"topic": ""
}
},
"webhook": {
"1": {
"enable": false,
"endpoint": ""
}
}
}
}
`)
// diff from config1 - amqp.Enable is True
config2 = []byte(`{
"version": "13",
"credential": {
"accessKey": "minio",
"secretKey": "minio123"
},
"region": "us-east-1",
"logger": {
"console": {
"enable": true,
"level": "debug"
},
"file": {
"enable": false,
"fileName": "",
"level": ""
}
},
"notify": {
"amqp": {
"1": {
"enable": true,
"url": "",
"exchange": "",
"routingKey": "",
"exchangeType": "",
"mandatory": false,
"immediate": false,
"durable": false,
"internal": false,
"noWait": false,
"autoDeleted": false
}
},
"nats": {
"1": {
"enable": false,
"address": "",
"subject": "",
"username": "",
"password": "",
"token": "",
"secure": false,
"pingInterval": 0,
"streaming": {
"enable": false,
"clusterID": "",
"clientID": "",
"async": false,
"maxPubAcksInflight": 0
}
}
},
"elasticsearch": {
"1": {
"enable": false,
"url": "",
"index": ""
}
},
"redis": {
"1": {
"enable": false,
"address": "",
"password": "",
"key": ""
}
},
"postgresql": {
"1": {
"enable": false,
"connectionString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"kafka": {
"1": {
"enable": false,
"brokers": null,
"topic": ""
}
},
"webhook": {
"1": {
"enable": false,
"endpoint": ""
}
}
}
}
`)
)
// TestGetValidServerConfig - test for getValidServerConfig.
func TestGetValidServerConfig(t *testing.T) {
var c1, c2 serverConfigV13
err := json.Unmarshal(config1, &c1)
if err != nil {
t.Fatalf("json unmarshal of %s failed: %v", string(config1), err)
}
err = json.Unmarshal(config2, &c2)
if err != nil {
t.Fatalf("json unmarshal of %s failed: %v", string(config2), err)
}
// Valid config.
noErrs := []error{nil, nil, nil, nil}
serverConfigs := []serverConfigV13{c1, c2, c1, c1}
validConfig, err := getValidServerConfig(serverConfigs, noErrs)
if err != nil {
t.Errorf("Expected a valid config but received %v instead", err)
}
if !reflect.DeepEqual(validConfig, c1) {
t.Errorf("Expected valid config to be %v but received %v", config1, validConfig)
}
// Invalid config - no quorum.
serverConfigs = []serverConfigV13{c1, c2, c2, c1}
validConfig, err = getValidServerConfig(serverConfigs, noErrs)
if err != errXLWriteQuorum {
t.Errorf("Expected to fail due to lack of quorum but received %v", err)
}
// All errors
allErrs := []error{errDiskNotFound, errDiskNotFound, errDiskNotFound, errDiskNotFound}
serverConfigs = []serverConfigV13{{}, {}, {}, {}}
validConfig, err = getValidServerConfig(serverConfigs, allErrs)
if err != errXLWriteQuorum {
t.Errorf("Expected to fail due to lack of quorum but received %v", err)
}
}

View File

@@ -17,8 +17,13 @@
package cmd
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/rpc"
"os"
"path/filepath"
"time"
router "github.com/gorilla/mux"
@@ -54,6 +59,12 @@ type UptimeReply struct {
Uptime time.Duration
}
// ConfigReply - wraps the server config response over RPC.
type ConfigReply struct {
AuthRPCReply
Config []byte // json-marshalled bytes of serverConfigV13
}
// Restart - Restart this instance of minio server.
func (s *adminCmd) Restart(args *AuthRPCArgs, reply *AuthRPCReply) error {
if err := args.IsAuthenticated(); err != nil {
@@ -132,6 +143,78 @@ func (s *adminCmd) Uptime(args *AuthRPCArgs, reply *UptimeReply) error {
return nil
}
// GetConfig - returns the config.json of this server.
func (s *adminCmd) GetConfig(args *AuthRPCArgs, reply *ConfigReply) error {
if err := args.IsAuthenticated(); err != nil {
return err
}
if serverConfig == nil {
return errors.New("config not present")
}
jsonBytes, err := json.Marshal(serverConfig)
if err != nil {
return err
}
reply.Config = jsonBytes
return nil
}
// WriteConfigArgs - wraps the bytes to be written and temporary file name.
type WriteConfigArgs struct {
AuthRPCArgs
TmpFileName string
Buf []byte
}
// WriteConfigReply - wraps the result of a writing config into a temporary file.
// the remote node.
type WriteConfigReply struct {
AuthRPCReply
}
func writeTmpConfigCommon(tmpFileName string, configBytes []byte) error {
tmpConfigFile := filepath.Join(getConfigDir(), tmpFileName)
err := ioutil.WriteFile(tmpConfigFile, configBytes, 0666)
errorIf(err, fmt.Sprintf("Failed to write to temporary config file %s", tmpConfigFile))
return err
}
// WriteTmpConfig - writes the supplied config contents onto the
// supplied temporary file.
func (s *adminCmd) WriteTmpConfig(wArgs *WriteConfigArgs, wReply *WriteConfigReply) error {
if err := wArgs.IsAuthenticated(); err != nil {
return err
}
return writeTmpConfigCommon(wArgs.TmpFileName, wArgs.Buf)
}
// CommitConfigArgs - wraps the config file name that needs to be
// committed into config.json on this node.
type CommitConfigArgs struct {
AuthRPCArgs
FileName string
}
// CommitConfigReply - represents response to commit of config file on
// this node.
type CommitConfigReply struct {
AuthRPCReply
}
// CommitConfig - Renames the temporary file into config.json on this node.
func (s *adminCmd) CommitConfig(cArgs *CommitConfigArgs, cReply *CommitConfigReply) error {
configFile := getConfigFile()
tmpConfigFile := filepath.Join(getConfigDir(), cArgs.FileName)
err := os.Rename(tmpConfigFile, configFile)
errorIf(err, fmt.Sprintf("Failed to rename %s to %s", tmpConfigFile, configFile))
return err
}
// registerAdminRPCRouter - registers RPC methods for service status,
// stop and restart commands.
func registerAdminRPCRouter(mux *router.Router) error {
@@ -141,7 +224,7 @@ func registerAdminRPCRouter(mux *router.Router) error {
if err != nil {
return traceError(err)
}
adminRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter()
adminRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
adminRouter.Path(adminPath).Handler(adminRPCServer)
return nil
}

View File

@@ -17,6 +17,7 @@
package cmd
import (
"encoding/json"
"net/url"
"testing"
"time"
@@ -52,7 +53,7 @@ func testAdminCmd(cmd cmdType, t *testing.T) {
<-globalServiceSignalCh
}()
ga := AuthRPCArgs{AuthToken: reply.AuthToken, RequestTime: time.Now().UTC()}
ga := AuthRPCArgs{AuthToken: reply.AuthToken}
genReply := AuthRPCReply{}
switch cmd {
case restartCmd:
@@ -107,8 +108,7 @@ func TestReInitDisks(t *testing.T) {
}
authArgs := AuthRPCArgs{
AuthToken: reply.AuthToken,
RequestTime: time.Now().UTC(),
AuthToken: reply.AuthToken,
}
authReply := AuthRPCReply{}
@@ -133,8 +133,7 @@ func TestReInitDisks(t *testing.T) {
}
authArgs = AuthRPCArgs{
AuthToken: fsReply.AuthToken,
RequestTime: time.Now().UTC(),
AuthToken: fsReply.AuthToken,
}
authReply = AuthRPCReply{}
// Attempt ReInitDisks service on a FS backend.
@@ -144,3 +143,106 @@ func TestReInitDisks(t *testing.T) {
errUnsupportedBackend, err)
}
}
// TestGetConfig - Test for GetConfig admin RPC.
func TestGetConfig(t *testing.T) {
// Reset global variables to start afresh.
resetTestGlobals()
rootPath, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("Unable to initialize server config. %s", err)
}
defer removeAll(rootPath)
adminServer := adminCmd{}
creds := serverConfig.GetCredential()
args := LoginRPCArgs{
Username: creds.AccessKey,
Password: creds.SecretKey,
Version: Version,
RequestTime: time.Now().UTC(),
}
reply := LoginRPCReply{}
err = adminServer.Login(&args, &reply)
if err != nil {
t.Fatalf("Failed to login to admin server - %v", err)
}
authArgs := AuthRPCArgs{
AuthToken: reply.AuthToken,
}
configReply := ConfigReply{}
err = adminServer.GetConfig(&authArgs, &configReply)
if err != nil {
t.Errorf("Expected GetConfig to pass but failed with %v", err)
}
var config serverConfigV13
err = json.Unmarshal(configReply.Config, &config)
if err != nil {
t.Errorf("Expected json unmarshal to pass but failed with %v", err)
}
}
// TestWriteAndCommitConfig - test for WriteTmpConfig and CommitConfig
// RPC handler.
func TestWriteAndCommitConfig(t *testing.T) {
// Reset global variables to start afresh.
resetTestGlobals()
rootPath, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("Unable to initialize server config. %s", err)
}
defer removeAll(rootPath)
adminServer := adminCmd{}
creds := serverConfig.GetCredential()
args := LoginRPCArgs{
Username: creds.AccessKey,
Password: creds.SecretKey,
Version: Version,
RequestTime: time.Now().UTC(),
}
reply := LoginRPCReply{}
err = adminServer.Login(&args, &reply)
if err != nil {
t.Fatalf("Failed to login to admin server - %v", err)
}
// Write temporary config.
buf := []byte("hello")
tmpFileName := mustGetUUID()
wArgs := WriteConfigArgs{
AuthRPCArgs: AuthRPCArgs{
AuthToken: reply.AuthToken,
},
TmpFileName: tmpFileName,
Buf: buf,
}
err = adminServer.WriteTmpConfig(&wArgs, &WriteConfigReply{})
if err != nil {
t.Fatalf("Failed to write temporary config %v", err)
}
if err != nil {
t.Errorf("Expected to succeed but failed %v", err)
}
cArgs := CommitConfigArgs{
AuthRPCArgs: AuthRPCArgs{
AuthToken: reply.AuthToken,
},
FileName: tmpFileName,
}
cReply := CommitConfigReply{}
err = adminServer.CommitConfig(&cArgs, &cReply)
if err != nil {
t.Fatalf("Failed to commit config file %v", err)
}
}

View File

@@ -56,6 +56,8 @@ const (
ErrInvalidBucketName
ErrInvalidDigest
ErrInvalidRange
ErrInvalidCopyPartRange
ErrInvalidCopyPartRangeSource
ErrInvalidMaxKeys
ErrInvalidMaxUploads
ErrInvalidMaxParts
@@ -137,6 +139,7 @@ const (
ErrObjectExistsAsDirectory
ErrPolicyNesting
ErrInvalidObjectName
ErrInvalidResourceName
ErrServerNotInitialized
// Add new extended error codes here.
// Please open a https://github.com/minio/minio/issues before adding
@@ -144,6 +147,7 @@ const (
ErrAdminInvalidAccessKey
ErrAdminInvalidSecretKey
ErrAdminConfigNoQuorum
)
// error code to APIError structure, these fields carry respective
@@ -539,6 +543,16 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "Configurations overlap. Configurations on the same bucket cannot share a common event type.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidCopyPartRange: {
Code: "InvalidArgument",
Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidCopyPartRangeSource: {
Code: "InvalidArgument",
Description: "Range specified is not valid for source object",
HTTPStatusCode: http.StatusBadRequest,
},
/// S3 extensions.
ErrContentSHA256Mismatch: {
@@ -575,7 +589,12 @@ var errorCodeResponse = map[APIErrorCode]APIError{
},
ErrInvalidObjectName: {
Code: "XMinioInvalidObjectName",
Description: "Object name contains unsupported characters. Unsupported characters are `^*|\\\"",
Description: "Object name contains unsupported characters.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidResourceName: {
Code: "XMinioInvalidResourceName",
Description: "Resource name contains bad components such as \"..\" or \".\".",
HTTPStatusCode: http.StatusBadRequest,
},
ErrServerNotInitialized: {
@@ -593,6 +612,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "The secret key is invalid.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminConfigNoQuorum: {
Code: "XMinioAdminConfigNoQuorum",
Description: "Configuration update failed because server quorum was not met",
HTTPStatusCode: http.StatusServiceUnavailable,
},
// Add your error structure here.
}
@@ -674,6 +698,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrEntityTooLarge
case ObjectTooSmall:
apiErr = ErrEntityTooSmall
case NotImplemented:
apiErr = ErrNotImplemented
default:
apiErr = ErrInternalError
}

View File

@@ -103,6 +103,10 @@ func TestAPIErrCode(t *testing.T) {
StorageFull{},
ErrStorageFull,
},
{
NotImplemented{},
ErrNotImplemented,
},
{
errSignatureMismatch,
ErrSignatureDoesNotMatch,

View File

@@ -166,12 +166,13 @@ type ListBucketsResponse struct {
// Upload container for in progress multipart upload
type Upload struct {
Key string
UploadID string `xml:"UploadId"`
Initiator Initiator
Owner Owner
StorageClass string
Initiated string
Key string
UploadID string `xml:"UploadId"`
Initiator Initiator
Owner Owner
StorageClass string
Initiated string
HealUploadInfo *HealObjectInfo `xml:"HealObjectInfo,omitempty"`
}
// CommonPrefix container for prefix response in ListObjectsResponse
@@ -488,6 +489,7 @@ func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMult
newUpload.UploadID = upload.UploadID
newUpload.Key = upload.Object
newUpload.Initiated = upload.Initiated.UTC().Format(timeFormatAMZLong)
newUpload.HealUploadInfo = upload.HealUploadInfo
listMultipartUploadsResponse.Uploads[index] = newUpload
}
return listMultipartUploadsResponse

View File

@@ -63,7 +63,9 @@ func isRequestPostPolicySignatureV4(r *http.Request) bool {
// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation.
func isRequestSignStreamingV4(r *http.Request) bool {
return r.Header.Get("x-amz-content-sha256") == streamingContentSHA256 && r.Method == httpPUT
return r.Header.Get("x-amz-content-sha256") == streamingContentSHA256 &&
r.Header.Get("content-encoding") == streamingContentEncoding &&
r.Method == httpPUT
}
// Authorization type.

View File

@@ -43,6 +43,7 @@ func TestGetRequestAuthType(t *testing.T) {
Header: http.Header{
"Authorization": []string{"AWS4-HMAC-SHA256 <cred_string>"},
"X-Amz-Content-Sha256": []string{streamingContentSHA256},
"Content-Encoding": []string{streamingContentEncoding},
},
Method: "PUT",
},
@@ -315,7 +316,11 @@ func TestIsReqAuthenticated(t *testing.T) {
}
defer removeAll(path)
creds := newCredentialWithKeys("myuser", "mypassword")
creds, err := createCredential("myuser", "mypassword")
if err != nil {
t.Fatalf("unable create credential, %s", err)
}
serverConfig.SetCredential(creds)
// List of test cases for validating http request authentication.

View File

@@ -111,13 +111,13 @@ func (authClient *AuthRPCClient) Login() (err error) {
// call makes a RPC call after logs into the server.
func (authClient *AuthRPCClient) call(serviceMethod string, args interface {
SetAuthToken(authToken string)
SetRequestTime(requestTime time.Time)
}, reply interface{}) (err error) {
// On successful login, execute RPC call.
if err = authClient.Login(); err == nil {
authClient.Lock()
// Set token and timestamp before the rpc call.
args.SetAuthToken(authClient.authToken)
args.SetRequestTime(time.Now().UTC())
authClient.Unlock()
// Do RPC call.
err = authClient.rpcClient.Call(serviceMethod, args, reply)
@@ -128,7 +128,6 @@ func (authClient *AuthRPCClient) call(serviceMethod string, args interface {
// Call executes RPC call till success or globalAuthRPCRetryThreshold on ErrShutdown.
func (authClient *AuthRPCClient) Call(serviceMethod string, args interface {
SetAuthToken(authToken string)
SetRequestTime(requestTime time.Time)
}, reply interface{}) (err error) {
// Done channel is used to close any lingering retry routine, as soon

185
cmd/azure-anonymous.go Normal file
View File

@@ -0,0 +1,185 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
"github.com/Azure/azure-sdk-for-go/storage"
)
// AnonGetBucketInfo - Get bucket metadata from azure anonymously.
func (a AzureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) {
url, err := url.Parse(a.client.GetBlobURL(bucket, ""))
if err != nil {
return bucketInfo, azureToObjectError(traceError(err))
}
url.RawQuery = "restype=container"
resp, err := http.Head(url.String())
if err != nil {
return bucketInfo, azureToObjectError(traceError(err), bucket)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return bucketInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket)), bucket)
}
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
return bucketInfo, traceError(err)
}
bucketInfo = BucketInfo{
Name: bucket,
Created: t,
}
return bucketInfo, nil
}
// AnonGetObject - SendGET request without authentication.
// This is needed when clients send GET requests on objects that can be downloaded without auth.
func (a AzureObjects) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) {
url := a.client.GetBlobURL(bucket, object)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return azureToObjectError(traceError(err), bucket, object)
}
if length > 0 && startOffset > 0 {
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
} else if startOffset > 0 {
req.Header.Add("Range", fmt.Sprintf("bytes=%d-", startOffset))
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return azureToObjectError(traceError(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
return azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
_, err = io.Copy(writer, resp.Body)
return traceError(err)
}
// AnonGetObjectInfo - Send HEAD request without authentication and convert the
// result to ObjectInfo.
func (a AzureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
resp, err := http.Head(a.client.GetBlobURL(bucket, object))
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return objInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
var contentLength int64
contentLengthStr := resp.Header.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil {
return objInfo, azureToObjectError(traceError(errUnexpected), bucket, object)
}
}
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
return objInfo, traceError(err)
}
objInfo.ModTime = t
objInfo.Bucket = bucket
objInfo.UserDefined = make(map[string]string)
if resp.Header.Get("Content-Encoding") != "" {
objInfo.UserDefined["Content-Encoding"] = resp.Header.Get("Content-Encoding")
}
objInfo.UserDefined["Content-Type"] = resp.Header.Get("Content-Type")
objInfo.MD5Sum = resp.Header.Get("Etag")
objInfo.ModTime = t
objInfo.Name = object
objInfo.Size = contentLength
return
}
// AnonListObjects - Use Azure equivalent ListBlobs.
func (a AzureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
params := storage.ListBlobsParameters{
Prefix: prefix,
Marker: marker,
Delimiter: delimiter,
MaxResults: uint(maxKeys),
}
q := azureListBlobsGetParameters(params)
q.Set("restype", "container")
q.Set("comp", "list")
url, err := url.Parse(a.client.GetBlobURL(bucket, ""))
if err != nil {
return result, azureToObjectError(traceError(err))
}
url.RawQuery = q.Encode()
resp, err := http.Get(url.String())
if err != nil {
return result, azureToObjectError(traceError(err))
}
defer resp.Body.Close()
var listResp storage.BlobListResponse
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, azureToObjectError(traceError(err))
}
err = xml.Unmarshal(data, &listResp)
if err != nil {
return result, azureToObjectError(traceError(err))
}
result.IsTruncated = listResp.NextMarker != ""
result.NextMarker = listResp.NextMarker
for _, object := range listResp.Blobs {
t, e := time.Parse(time.RFC1123, object.Properties.LastModified)
if e != nil {
continue
}
result.Objects = append(result.Objects, ObjectInfo{
Bucket: bucket,
Name: object.Name,
ModTime: t,
Size: object.Properties.ContentLength,
MD5Sum: object.Properties.Etag,
ContentType: object.Properties.ContentType,
ContentEncoding: object.Properties.ContentEncoding,
})
}
result.Prefixes = listResp.BlobPrefixes
return result, nil
}

43
cmd/azure-unsupported.go Normal file
View File

@@ -0,0 +1,43 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
// HealBucket - Not relevant.
func (a AzureObjects) HealBucket(bucket string) error {
return traceError(NotImplemented{})
}
// ListBucketsHeal - Not relevant.
func (a AzureObjects) ListBucketsHeal() (buckets []BucketInfo, err error) {
return nil, traceError(NotImplemented{})
}
// HealObject - Not relevant.
func (a AzureObjects) HealObject(bucket, object string) error {
return traceError(NotImplemented{})
}
// ListObjectsHeal - Not relevant.
func (a AzureObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
return ListObjectsInfo{}, traceError(NotImplemented{})
}
// ListUploadsHeal - Not relevant.
func (a AzureObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (ListMultipartsInfo, error) {
return ListMultipartsInfo{}, traceError(NotImplemented{})
}

637
cmd/azure.go Normal file
View File

@@ -0,0 +1,637 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/base64"
"encoding/hex"
"fmt"
"hash"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/sha256-simd"
)
const globalAzureAPIVersion = "2016-05-31"
// To store metadata during NewMultipartUpload which will be used after
// CompleteMultipartUpload to call SetBlobMetadata.
type azureMultipartMetaInfo struct {
meta map[string]map[string]string
*sync.Mutex
}
// Return metadata map of the multipart object.
func (a *azureMultipartMetaInfo) get(key string) map[string]string {
a.Lock()
defer a.Unlock()
return a.meta[key]
}
// Set metadata map for the multipart object.
func (a *azureMultipartMetaInfo) set(key string, value map[string]string) {
a.Lock()
defer a.Unlock()
a.meta[key] = value
}
// Delete metadata map for the multipart object.
func (a *azureMultipartMetaInfo) del(key string) {
a.Lock()
defer a.Unlock()
delete(a.meta, key)
}
// AzureObjects - Implements Object layer for Azure blob storage.
type AzureObjects struct {
client storage.BlobStorageClient // Azure sdk client
metaInfo azureMultipartMetaInfo
}
// Convert azure errors to minio object layer errors.
func azureToObjectError(err error, params ...string) error {
if err == nil {
return nil
}
e, ok := err.(*Error)
if !ok {
// Code should be fixed if this function is called without doing traceError()
// Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error")
return err
}
err = e.e
bucket := ""
object := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
azureErr, ok := err.(storage.AzureStorageServiceError)
if !ok {
// We don't interpret non Azure errors. As azure errors will
// have StatusCode to help to convert to object errors.
return e
}
switch azureErr.Code {
case "ContainerAlreadyExists":
err = BucketExists{Bucket: bucket}
case "InvalidResourceName":
err = BucketNameInvalid{Bucket: bucket}
default:
switch azureErr.StatusCode {
case http.StatusNotFound:
if object != "" {
err = ObjectNotFound{bucket, object}
} else {
err = BucketNotFound{Bucket: bucket}
}
case http.StatusBadRequest:
err = BucketNameInvalid{Bucket: bucket}
}
}
e.e = err
return e
}
// Inits azure blob storage client and returns AzureObjects.
func newAzureLayer(endPoint string, account, key string, secure bool) (GatewayLayer, error) {
if endPoint == "" {
endPoint = storage.DefaultBaseURL
}
c, err := storage.NewClient(account, key, endPoint, globalAzureAPIVersion, secure)
if err != nil {
return AzureObjects{}, err
}
return &AzureObjects{
client: c.GetBlobService(),
metaInfo: azureMultipartMetaInfo{
meta: make(map[string]map[string]string),
Mutex: &sync.Mutex{},
},
}, nil
}
// Shutdown - save any gateway metadata to disk
// if necessary and reload upon next restart.
func (a AzureObjects) Shutdown() error {
// TODO
return nil
}
// StorageInfo - Not relevant to Azure backend.
func (a AzureObjects) StorageInfo() StorageInfo {
return StorageInfo{}
}
// MakeBucket - Create a new container on azure backend.
func (a AzureObjects) MakeBucket(bucket string) error {
err := a.client.CreateContainer(bucket, storage.ContainerAccessTypePrivate)
return azureToObjectError(traceError(err), bucket)
}
// GetBucketInfo - Get bucket metadata..
func (a AzureObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
// Azure does not have an equivalent call, hence use ListContainers.
resp, err := a.client.ListContainers(storage.ListContainersParameters{
Prefix: bucket,
})
if err != nil {
return BucketInfo{}, azureToObjectError(traceError(err), bucket)
}
for _, container := range resp.Containers {
if container.Name == bucket {
t, e := time.Parse(time.RFC1123, container.Properties.LastModified)
if e == nil {
return BucketInfo{
Name: bucket,
Created: t,
}, nil
} // else continue
}
}
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
}
// ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers.
func (a AzureObjects) ListBuckets() (buckets []BucketInfo, err error) {
resp, err := a.client.ListContainers(storage.ListContainersParameters{})
if err != nil {
return nil, azureToObjectError(traceError(err))
}
for _, container := range resp.Containers {
t, e := time.Parse(time.RFC1123, container.Properties.LastModified)
if e != nil {
return nil, traceError(e)
}
buckets = append(buckets, BucketInfo{
Name: container.Name,
Created: t,
})
}
return buckets, nil
}
// DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer.
func (a AzureObjects) DeleteBucket(bucket string) error {
return azureToObjectError(traceError(a.client.DeleteContainer(bucket)), bucket)
}
// ListObjects - lists all blobs on azure with in a container filtered by prefix
// and marker, uses Azure equivalent ListBlobs.
func (a AzureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
resp, err := a.client.ListBlobs(bucket, storage.ListBlobsParameters{
Prefix: prefix,
Marker: marker,
Delimiter: delimiter,
MaxResults: uint(maxKeys),
})
if err != nil {
return result, azureToObjectError(traceError(err), bucket, prefix)
}
result.IsTruncated = resp.NextMarker != ""
result.NextMarker = resp.NextMarker
for _, object := range resp.Blobs {
t, e := time.Parse(time.RFC1123, object.Properties.LastModified)
if e != nil {
continue
}
result.Objects = append(result.Objects, ObjectInfo{
Bucket: bucket,
Name: object.Name,
ModTime: t,
Size: object.Properties.ContentLength,
MD5Sum: canonicalizeETag(object.Properties.Etag),
ContentType: object.Properties.ContentType,
ContentEncoding: object.Properties.ContentEncoding,
})
}
result.Prefixes = resp.BlobPrefixes
return result, nil
}
// GetObject - reads an object from azure. Supports additional
// parameters like offset and length which are synonymous with
// HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (a AzureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error {
byteRange := fmt.Sprintf("%d-", startOffset)
if length > 0 && startOffset > 0 {
byteRange = fmt.Sprintf("%d-%d", startOffset, startOffset+length-1)
}
var rc io.ReadCloser
var err error
if startOffset == 0 && length == 0 {
rc, err = a.client.GetBlob(bucket, object)
} else {
rc, err = a.client.GetBlobRange(bucket, object, byteRange, nil)
}
if err != nil {
return azureToObjectError(traceError(err), bucket, object)
}
_, err = io.Copy(writer, rc)
rc.Close()
return traceError(err)
}
// GetObjectInfo - reads blob metadata properties and replies back ObjectInfo,
// uses zure equivalent GetBlobProperties.
func (a AzureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
prop, err := a.client.GetBlobProperties(bucket, object)
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
}
t, err := time.Parse(time.RFC1123, prop.LastModified)
if err != nil {
return objInfo, traceError(err)
}
objInfo = ObjectInfo{
Bucket: bucket,
UserDefined: make(map[string]string),
MD5Sum: canonicalizeETag(prop.Etag),
ModTime: t,
Name: object,
Size: prop.ContentLength,
}
if prop.ContentEncoding != "" {
objInfo.UserDefined["Content-Encoding"] = prop.ContentEncoding
}
objInfo.UserDefined["Content-Type"] = prop.ContentType
return objInfo, nil
}
// Canonicalize the metadata headers, without this azure-sdk calculates
// incorrect signature. This attempt to canonicalize is to convert
// any HTTP header which is of form say `accept-encoding` should be
// converted to `Accept-Encoding` in its canonical form.
func canonicalMetadata(metadata map[string]string) (canonical map[string]string) {
canonical = make(map[string]string)
for k, v := range metadata {
canonical[http.CanonicalHeaderKey(k)] = v
}
return canonical
}
// PutObject - Create a new blob with the incoming data,
// uses Azure equivalent CreateBlockBlobFromReader.
func (a AzureObjects) PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) {
var sha256Writer hash.Hash
teeReader := data
if sha256sum != "" {
sha256Writer = sha256.New()
teeReader = io.TeeReader(data, sha256Writer)
}
delete(metadata, "md5Sum")
err = a.client.CreateBlockBlobFromReader(bucket, object, uint64(size), teeReader, canonicalMetadata(metadata))
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
}
if sha256sum != "" {
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
if newSHA256sum != sha256sum {
a.client.DeleteBlob(bucket, object, nil)
return ObjectInfo{}, traceError(SHA256Mismatch{})
}
}
return a.GetObjectInfo(bucket, object)
}
// CopyObject - Copies a blob from source container to destination container.
// Uses Azure equivalent CopyBlob API.
func (a AzureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error) {
err = a.client.CopyBlob(destBucket, destObject, a.client.GetBlobURL(srcBucket, srcObject))
if err != nil {
return objInfo, azureToObjectError(traceError(err), srcBucket, srcObject)
}
return a.GetObjectInfo(destBucket, destObject)
}
// DeleteObject - Deletes a blob on azure container, uses Azure
// equivalent DeleteBlob API.
func (a AzureObjects) DeleteObject(bucket, object string) error {
err := a.client.DeleteBlob(bucket, object, nil)
if err != nil {
return azureToObjectError(traceError(err), bucket, object)
}
return nil
}
// ListMultipartUploads - Incomplete implementation, for now just return the prefix if it is an incomplete upload.
// FIXME: Full ListMultipartUploads is not supported yet. It is supported just enough to help our client libs to
// support re-uploads. a.client.ListBlobs() can be made to return entries which include uncommitted blobs using
// which we need to filter out the committed blobs to get the list of uncommitted blobs.
func (a AzureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
result.MaxUploads = maxUploads
result.Prefix = prefix
result.Delimiter = delimiter
meta := a.metaInfo.get(prefix)
if meta == nil {
// In case minio was restarted after NewMultipartUpload and before CompleteMultipartUpload we expect
// the client to do a fresh upload so that any metadata like content-type are sent again in the
// NewMultipartUpload.
return result, nil
}
result.Uploads = []uploadMetadata{{prefix, prefix, time.Now().UTC(), "", nil}}
return result, nil
}
// NewMultipartUpload - Use Azure equivalent CreateBlockBlob.
func (a AzureObjects) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) {
// Azure doesn't return a unique upload ID and we use object name in place of it. Azure allows multiple uploads to
// co-exist as long as the user keeps the blocks uploaded (in block blobs) unique amongst concurrent upload attempts.
// Each concurrent client, keeps its own blockID list which it can commit.
uploadID = object
if metadata == nil {
// Store an empty map as a placeholder else ListObjectParts/PutObjectPart will not work properly.
metadata = make(map[string]string)
} else {
metadata = canonicalMetadata(metadata)
}
a.metaInfo.set(uploadID, metadata)
return uploadID, nil
}
// CopyObjectPart - Not implemented.
func (a AzureObjects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) {
return info, traceError(NotImplemented{})
}
// Encode partID+md5Hex to a blockID.
func azureGetBlockID(partID int, md5Hex string) string {
return base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%.5d.%s", partID, md5Hex)))
}
// Decode blockID to partID+md5Hex.
func azureParseBlockID(blockID string) (int, string, error) {
idByte, err := base64.StdEncoding.DecodeString(blockID)
if err != nil {
return 0, "", traceError(err)
}
idStr := string(idByte)
splitRes := strings.Split(idStr, ".")
if len(splitRes) != 2 {
return 0, "", traceError(errUnexpected)
}
partID, err := strconv.Atoi(splitRes[0])
if err != nil {
return 0, "", traceError(err)
}
return partID, splitRes[1], nil
}
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
func (a AzureObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (info PartInfo, err error) {
if meta := a.metaInfo.get(uploadID); meta == nil {
return info, traceError(InvalidUploadID{})
}
var sha256Writer hash.Hash
if sha256sum != "" {
sha256Writer = sha256.New()
}
teeReader := io.TeeReader(data, sha256Writer)
id := azureGetBlockID(partID, md5Hex)
err = a.client.PutBlockWithLength(bucket, object, id, uint64(size), teeReader, nil)
if err != nil {
return info, azureToObjectError(traceError(err), bucket, object)
}
if sha256sum != "" {
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
if newSHA256sum != sha256sum {
return PartInfo{}, traceError(SHA256Mismatch{})
}
}
info.PartNumber = partID
info.ETag = md5Hex
info.LastModified = time.Now().UTC()
info.Size = size
return info, nil
}
// ListObjectParts - Use Azure equivalent GetBlockList.
func (a AzureObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) {
result.Bucket = bucket
result.Object = object
result.UploadID = uploadID
result.MaxParts = maxParts
if meta := a.metaInfo.get(uploadID); meta == nil {
return result, nil
}
resp, err := a.client.GetBlockList(bucket, object, storage.BlockListTypeUncommitted)
if err != nil {
return result, azureToObjectError(traceError(err), bucket, object)
}
tmpMaxParts := 0
partCount := 0 // Used for figuring out IsTruncated.
nextPartNumberMarker := 0
for _, part := range resp.UncommittedBlocks {
if tmpMaxParts == maxParts {
// Also takes care of the case if maxParts = 0
break
}
partCount++
partID, md5Hex, err := azureParseBlockID(part.Name)
if err != nil {
return result, err
}
if partID <= partNumberMarker {
continue
}
result.Parts = append(result.Parts, PartInfo{
partID,
time.Now().UTC(),
md5Hex,
part.Size,
})
tmpMaxParts++
nextPartNumberMarker = partID
}
if partCount < len(resp.UncommittedBlocks) {
result.IsTruncated = true
result.NextPartNumberMarker = nextPartNumberMarker
}
return result, nil
}
// AbortMultipartUpload - Not Implemented.
// There is no corresponding API in azure to abort an incomplete upload. The uncommmitted blocks
// gets deleted after one week.
func (a AzureObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
a.metaInfo.del(uploadID)
return nil
}
// CompleteMultipartUpload - Use Azure equivalent PutBlockList.
func (a AzureObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error) {
meta := a.metaInfo.get(uploadID)
if meta == nil {
return objInfo, traceError(InvalidUploadID{uploadID})
}
var blocks []storage.Block
for _, part := range uploadedParts {
blocks = append(blocks, storage.Block{
ID: azureGetBlockID(part.PartNumber, part.ETag),
Status: storage.BlockStatusUncommitted,
})
}
err = a.client.PutBlockList(bucket, object, blocks)
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
}
if len(meta) > 0 {
prop := storage.BlobHeaders{
ContentMD5: meta["Content-Md5"],
ContentLanguage: meta["Content-Language"],
ContentEncoding: meta["Content-Encoding"],
ContentType: meta["Content-Type"],
CacheControl: meta["Cache-Control"],
}
err = a.client.SetBlobProperties(bucket, object, prop)
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
}
}
a.metaInfo.del(uploadID)
return a.GetObjectInfo(bucket, object)
}
func anonErrToObjectErr(statusCode int, params ...string) error {
bucket := ""
object := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
switch statusCode {
case http.StatusNotFound:
if object != "" {
return ObjectNotFound{bucket, object}
}
return BucketNotFound{Bucket: bucket}
case http.StatusBadRequest:
if object != "" {
return ObjectNameInvalid{bucket, object}
}
return BucketNameInvalid{Bucket: bucket}
}
return errUnexpected
}
// Copied from github.com/Azure/azure-sdk-for-go/storage/blob.go
func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Delimiter != "" {
out.Set("delimiter", p.Delimiter)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != "" {
out.Set("include", p.Include)
}
if p.MaxResults != 0 {
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
}
if p.Timeout != 0 {
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
}
return out
}
// SetBucketPolicies - Azure supports three types of container policies:
// storage.ContainerAccessTypeContainer - readonly in minio terminology
// storage.ContainerAccessTypeBlob - readonly without listing in minio terminology
// storage.ContainerAccessTypePrivate - none in minio terminology
// As the common denominator for minio and azure is readonly and none, we support
// these two policies at the bucket level.
func (a AzureObjects) SetBucketPolicies(bucket string, policies []BucketAccessPolicy) error {
prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 {
return traceError(NotImplemented{})
}
if policies[0].Prefix != prefix {
return traceError(NotImplemented{})
}
if policies[0].Policy != policy.BucketPolicyReadOnly {
return traceError(NotImplemented{})
}
perm := storage.ContainerPermissions{
AccessType: storage.ContainerAccessTypeContainer,
AccessPolicies: nil,
}
err := a.client.SetContainerPermissions(bucket, perm, 0, "")
return azureToObjectError(traceError(err), bucket)
}
// GetBucketPolicies - Get the container ACL and convert it to canonical []bucketAccessPolicy
func (a AzureObjects) GetBucketPolicies(bucket string) ([]BucketAccessPolicy, error) {
perm, err := a.client.GetContainerPermissions(bucket, 0, "")
if err != nil {
return nil, azureToObjectError(traceError(err), bucket)
}
switch perm.AccessType {
case storage.ContainerAccessTypePrivate:
return nil, nil
case storage.ContainerAccessTypeContainer:
return []BucketAccessPolicy{{"", policy.BucketPolicyReadOnly}}, nil
}
return nil, azureToObjectError(traceError(NotImplemented{}))
}
// DeleteBucketPolicies - Set the container ACL to "private"
func (a AzureObjects) DeleteBucketPolicies(bucket string) error {
perm := storage.ContainerPermissions{
AccessType: storage.ContainerAccessTypePrivate,
AccessPolicies: nil,
}
err := a.client.SetContainerPermissions(bucket, perm, 0, "")
return azureToObjectError(traceError(err))
}

142
cmd/azure_test.go Normal file
View File

@@ -0,0 +1,142 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net/http"
"reflect"
"testing"
"github.com/Azure/azure-sdk-for-go/storage"
)
// Test canonical metadata.
func TestCanonicalMetadata(t *testing.T) {
metadata := map[string]string{
"accept-encoding": "gzip",
"content-encoding": "gzip",
}
expectedCanonicalM := map[string]string{
"Accept-Encoding": "gzip",
"Content-Encoding": "gzip",
}
actualCanonicalM := canonicalMetadata(metadata)
if !reflect.DeepEqual(actualCanonicalM, expectedCanonicalM) {
t.Fatalf("Test failed, expected %#v, got %#v", expectedCanonicalM, actualCanonicalM)
}
}
// Add tests for azure to object error.
func TestAzureToObjectError(t *testing.T) {
testCases := []struct {
actualErr error
expectedErr error
bucket, object string
}{
{
nil, nil, "", "",
},
{
traceError(errUnexpected), errUnexpected, "", "",
},
{
traceError(errUnexpected), traceError(errUnexpected), "", "",
},
{
traceError(storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists",
}), BucketExists{Bucket: "bucket"}, "bucket", "",
},
{
traceError(storage.AzureStorageServiceError{
Code: "InvalidResourceName",
}), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
},
{
traceError(storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound,
}), ObjectNotFound{
Bucket: "bucket",
Object: "object",
}, "bucket", "object",
},
{
traceError(storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound,
}), BucketNotFound{Bucket: "bucket"}, "bucket", "",
},
{
traceError(storage.AzureStorageServiceError{
StatusCode: http.StatusBadRequest,
}), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
},
}
for i, testCase := range testCases {
err := azureToObjectError(testCase.actualErr, testCase.bucket, testCase.object)
if err != nil {
if err.Error() != testCase.expectedErr.Error() {
t.Errorf("Test %d: Expected error %s, got %s", i+1, testCase.expectedErr, err)
}
}
}
}
// Test azureGetBlockID().
func TestAzureGetBlockID(t *testing.T) {
testCases := []struct {
partID int
md5 string
blockID string
}{
{1, "d41d8cd98f00b204e9800998ecf8427e", "MDAwMDEuZDQxZDhjZDk4ZjAwYjIwNGU5ODAwOTk4ZWNmODQyN2U="},
{2, "a7fb6b7b36ee4ed66b5546fac4690273", "MDAwMDIuYTdmYjZiN2IzNmVlNGVkNjZiNTU0NmZhYzQ2OTAyNzM="},
}
for _, test := range testCases {
blockID := azureGetBlockID(test.partID, test.md5)
if blockID != test.blockID {
t.Fatalf("%s is not equal to %s", blockID, test.blockID)
}
}
}
// Test azureParseBlockID().
func TestAzureParseBlockID(t *testing.T) {
testCases := []struct {
partID int
md5 string
blockID string
}{
{1, "d41d8cd98f00b204e9800998ecf8427e", "MDAwMDEuZDQxZDhjZDk4ZjAwYjIwNGU5ODAwOTk4ZWNmODQyN2U="},
{2, "a7fb6b7b36ee4ed66b5546fac4690273", "MDAwMDIuYTdmYjZiN2IzNmVlNGVkNjZiNTU0NmZhYzQ2OTAyNzM="},
}
for _, test := range testCases {
partID, md5, err := azureParseBlockID(test.blockID)
if err != nil {
t.Fatal(err)
}
if partID != test.partID {
t.Fatalf("%d not equal to %d", partID, test.partID)
}
if md5 != test.md5 {
t.Fatalf("%s not equal to %s", md5, test.md5)
}
}
_, _, err := azureParseBlockID("junk")
if err == nil {
t.Fatal("Expected azureParseBlockID() to return error")
}
}

View File

@@ -28,25 +28,9 @@ import (
humanize "github.com/dustin/go-humanize"
)
// Prepare benchmark backend
// Prepare XL/FS backend for benchmark.
func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error) {
switch instanceType {
// Total number of disks for FS backend is set to 1.
case FSTestStr:
obj, disk, err := prepareFS()
if err != nil {
return nil, nil, err
}
return obj, []string{disk}, nil
// Total number of disks for XL backend is set to 16.
case XLTestStr:
return prepareXL()
}
obj, disk, err := prepareFS()
if err != nil {
return nil, nil, err
}
return obj, []string{disk}, nil
return prepareTestBackend(instanceType)
}
// Benchmark utility functions for ObjectLayer.PutObject().

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +17,7 @@
package cmd
import (
"fmt"
"path"
"sync"
"time"
@@ -63,8 +64,8 @@ func (br *browserPeerAPIHandlers) SetAuthPeer(args SetAuthPeerArgs, reply *AuthR
return err
}
if err := validateAuthKeys(args.Creds.AccessKey, args.Creds.SecretKey); err != nil {
return err
if !args.Creds.IsValid() {
return fmt.Errorf("Invalid credential passed")
}
// Update credentials in memory
@@ -111,7 +112,7 @@ func updateCredsOnPeers(creds credential) map[string]error {
secretKey: serverCred.SecretKey,
serverAddr: peers[ix],
secureConn: globalIsSSL,
serviceEndpoint: path.Join(reservedBucket, browserPeerPath),
serviceEndpoint: path.Join(minioReservedBucketPath, browserPeerPath),
serviceName: "BrowserPeer",
})

View File

@@ -36,7 +36,7 @@ func (s *TestRPCBrowserPeerSuite) SetUpSuite(c *testing.T) {
serverAddr: s.testServer.Server.Listener.Addr().String(),
accessKey: s.testServer.AccessKey,
secretKey: s.testServer.SecretKey,
serviceEndpoint: path.Join(reservedBucket, browserPeerPath),
serviceEndpoint: path.Join(minioReservedBucketPath, browserPeerPath),
serviceName: "BrowserPeer",
}
}
@@ -63,9 +63,9 @@ func TestBrowserPeerRPC(t *testing.T) {
// Tests for browser peer rpc.
func (s *TestRPCBrowserPeerSuite) testBrowserPeerRPC(t *testing.T) {
// Construct RPC call arguments.
creds := credential{
AccessKey: "abcd1",
SecretKey: "abcd1234",
creds, err := createCredential("abcd1", "abcd1234")
if err != nil {
t.Fatalf("unable to create credential. %v", err)
}
// Validate for invalid token.
@@ -73,7 +73,7 @@ func (s *TestRPCBrowserPeerSuite) testBrowserPeerRPC(t *testing.T) {
args.AuthToken = "garbage"
rclient := newRPCClient(s.testAuthConf.serverAddr, s.testAuthConf.serviceEndpoint, false)
defer rclient.Close()
err := rclient.Call("BrowserPeer.SetAuthPeer", &args, &AuthRPCReply{})
err = rclient.Call("BrowserPeer.SetAuthPeer", &args, &AuthRPCReply{})
if err != nil {
if err.Error() != errInvalidToken.Error() {
t.Fatal(err)

View File

@@ -45,7 +45,7 @@ func registerBrowserPeerRPCRouter(mux *router.Router) error {
return traceError(err)
}
bpRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter()
bpRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
bpRouter.Path(browserPeerPath).Handler(bpRPCServer)
return nil
}

View File

@@ -19,7 +19,6 @@ package cmd
import (
"encoding/base64"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
@@ -50,6 +49,10 @@ func enforceBucketPolicy(bucket, action, resource, referer string, queryParams u
return ErrInternalError
}
if globalBucketPolicies == nil {
return ErrAccessDenied
}
// Fetch bucket policy, if policy is not set return access denied.
policy := globalBucketPolicies.GetBucketPolicy(bucket)
if policy == nil {
@@ -86,10 +89,7 @@ func isBucketActionAllowed(action, bucket, prefix string) bool {
resource := bucketARNPrefix + path.Join(bucket, prefix)
var conditionKeyMap map[string]set.StringSet
// Validate action, resource and conditions with current policy statements.
if !bucketPolicyEvalStatements(action, resource, conditionKeyMap, policy.Statements) {
return false
}
return true
return bucketPolicyEvalStatements(action, resource, conditionKeyMap, policy.Statements)
}
// GetBucketLocationHandler - GET Bucket location.
@@ -277,7 +277,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
for index, object := range deleteObjects.Objects {
wg.Add(1)
go func(i int, obj ObjectIdentifier) {
objectLock := globalNSMutex.NewNSLock(bucket, obj.ObjectName)
objectLock.Lock()
defer objectLock.Unlock()
defer wg.Done()
dErr := objectAPI.DeleteObject(bucket, obj.ObjectName)
if dErr != nil {
dErrs[i] = dErr
@@ -326,9 +330,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
ObjInfo: ObjectInfo{
Name: dobj.ObjectName,
},
ReqParams: map[string]string{
"sourceIPAddress": r.RemoteAddr,
},
ReqParams: extractReqParams(r),
})
}
}
@@ -438,14 +440,25 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
defer fileBody.Close()
bucket := mux.Vars(r)["bucket"]
formValues["Bucket"] = bucket
formValues.Set("Bucket", bucket)
if fileName != "" && strings.Contains(formValues["Key"], "${filename}") {
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
// S3 feature to replace ${filename} found in Key form field
// by the filename attribute passed in multipart
formValues["Key"] = strings.Replace(formValues["Key"], "${filename}", fileName, -1)
formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1))
}
object := formValues.Get("Key")
successRedirect := formValues.Get("success_action_redirect")
successStatus := formValues.Get("success_action_status")
var redirectURL *url.URL
if successRedirect != "" {
redirectURL, err = url.Parse(successRedirect)
if err != nil {
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return
}
}
object := formValues["Key"]
// Verify policy signature.
apiErr := doesPolicySignatureMatch(formValues)
@@ -454,7 +467,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
return
}
policyBytes, err := base64.StdEncoding.DecodeString(formValues["Policy"])
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
if err != nil {
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return
@@ -482,7 +495,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
return
}
if fileSize > lengthRange.Max || fileSize > maxObjectSize {
if fileSize > lengthRange.Max || isMaxObjectSize(fileSize) {
errorIf(err, "Unable to create object.")
writeErrorResponse(w, toAPIErrorCode(errDataTooLarge), r.URL)
return
@@ -491,7 +504,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Extract metadata to be saved from received Form.
metadata := extractMetadataFromForm(formValues)
sha256sum := ""
objectLock := globalNSMutex.NewNSLock(bucket, object)
@@ -504,50 +516,40 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
w.Header().Set("ETag", `"`+objInfo.MD5Sum+`"`)
w.Header().Set("Location", getObjectLocation(bucket, object))
successRedirect := formValues[http.CanonicalHeaderKey("success_action_redirect")]
successStatus := formValues[http.CanonicalHeaderKey("success_action_status")]
// Notify object created event.
defer eventNotify(eventData{
Type: ObjectCreatedPost,
Bucket: objInfo.Bucket,
ObjInfo: objInfo,
ReqParams: extractReqParams(r),
})
if successStatus == "" && successRedirect == "" {
writeSuccessNoContent(w)
} else {
if successRedirect != "" {
redirectURL := successRedirect + "?" + fmt.Sprintf("bucket=%s&key=%s&etag=%s",
bucket,
getURLEncodedName(object),
getURLEncodedName("\""+objInfo.MD5Sum+"\""))
writeRedirectSeeOther(w, redirectURL)
} else {
// Decide what http response to send depending on success_action_status parameter
switch successStatus {
case "201":
resp := encodeResponse(PostResponse{
Bucket: bucket,
Key: object,
ETag: "\"" + objInfo.MD5Sum + "\"",
Location: getObjectLocation(bucket, object),
})
writeResponse(w, http.StatusCreated, resp, "application/xml")
case "200":
writeSuccessResponseHeadersOnly(w)
default:
writeSuccessNoContent(w)
}
}
if successRedirect != "" {
// Replace raw query params..
redirectURL.RawQuery = getRedirectPostRawQuery(objInfo)
writeRedirectSeeOther(w, redirectURL.String())
return
}
// Notify object created event.
eventNotify(eventData{
Type: ObjectCreatedPost,
Bucket: bucket,
ObjInfo: objInfo,
ReqParams: map[string]string{
"sourceIPAddress": r.RemoteAddr,
},
})
// Decide what http response to send depending on success_action_status parameter
switch successStatus {
case "201":
resp := encodeResponse(PostResponse{
Bucket: objInfo.Bucket,
Key: objInfo.Name,
ETag: `"` + objInfo.MD5Sum + `"`,
Location: getObjectLocation(objInfo.Bucket, objInfo.Name),
})
writeResponse(w, http.StatusCreated, resp, "application/xml")
case "200":
writeSuccessResponseHeadersOnly(w)
default:
writeSuccessNoContent(w)
}
}
// HeadBucketHandler - HEAD Bucket

View File

@@ -209,10 +209,7 @@ func writeNotification(w http.ResponseWriter, notification map[string][]Notifica
_, err = w.Write(append(notificationBytes, crlf...))
// Make sure we have flushed, this would set Transfer-Encoding: chunked.
w.(http.Flusher).Flush()
if err != nil {
return err
}
return nil
return err
}
// CRLF character used for chunked transfer in accordance with HTTP standards.

View File

@@ -253,19 +253,19 @@ func unmarshalSqsARN(queueARN string) (mSqs arnSQS) {
}
sqsType := strings.TrimPrefix(queueARN, minioSqs+serverConfig.GetRegion()+":")
switch {
case strings.HasSuffix(sqsType, queueTypeAMQP):
case hasSuffix(sqsType, queueTypeAMQP):
mSqs.Type = queueTypeAMQP
case strings.HasSuffix(sqsType, queueTypeNATS):
case hasSuffix(sqsType, queueTypeNATS):
mSqs.Type = queueTypeNATS
case strings.HasSuffix(sqsType, queueTypeElastic):
case hasSuffix(sqsType, queueTypeElastic):
mSqs.Type = queueTypeElastic
case strings.HasSuffix(sqsType, queueTypeRedis):
case hasSuffix(sqsType, queueTypeRedis):
mSqs.Type = queueTypeRedis
case strings.HasSuffix(sqsType, queueTypePostgreSQL):
case hasSuffix(sqsType, queueTypePostgreSQL):
mSqs.Type = queueTypePostgreSQL
case strings.HasSuffix(sqsType, queueTypeKafka):
case hasSuffix(sqsType, queueTypeKafka):
mSqs.Type = queueTypeKafka
case strings.HasSuffix(sqsType, queueTypeWebhook):
case hasSuffix(sqsType, queueTypeWebhook):
mSqs.Type = queueTypeWebhook
} // Add more queues here.
mSqs.AccountID = strings.TrimSuffix(sqsType, ":"+mSqs.Type)

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,148 +19,90 @@ package cmd
import (
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
)
// createCertsPath create certs path.
func createCertsPath() error {
certsPath, err := getCertsPath()
if err != nil {
return err
}
if err := os.MkdirAll(certsPath, 0700); err != nil {
return err
}
rootCAsPath := filepath.Join(certsPath, globalMinioCertsCADir)
return os.MkdirAll(rootCAsPath, 0700)
}
// getCertsPath get certs path.
func getCertsPath() (string, error) {
var certsPath string
configDir, err := getConfigPath()
if err != nil {
return "", err
}
certsPath = filepath.Join(configDir, globalMinioCertsDir)
return certsPath, nil
}
// mustGetCertsPath must get certs path.
func mustGetCertsPath() string {
certsPath, err := getCertsPath()
fatalIf(err, "Failed to get certificate path.")
return certsPath
}
// mustGetCertFile must get cert file.
func mustGetCertFile() string {
return filepath.Join(mustGetCertsPath(), globalMinioCertFile)
}
// mustGetKeyFile must get key file.
func mustGetKeyFile() string {
return filepath.Join(mustGetCertsPath(), globalMinioKeyFile)
}
// mustGetCAFiles must get the list of the CA certificates stored in minio config dir
func mustGetCAFiles() (caCerts []string) {
CAsDir := filepath.Join(mustGetCertsPath(), globalMinioCertsCADir)
caFiles, _ := ioutil.ReadDir(CAsDir)
for _, cert := range caFiles {
caCerts = append(caCerts, filepath.Join(CAsDir, cert.Name()))
}
return
}
// mustGetSystemCertPool returns empty cert pool in case of error (windows)
func mustGetSystemCertPool() *x509.CertPool {
pool, err := x509.SystemCertPool()
if err != nil {
return x509.NewCertPool()
}
return pool
}
// isCertFileExists verifies if cert file exists, returns true if
// found, false otherwise.
func isCertFileExists() bool {
st, e := os.Stat(filepath.Join(mustGetCertsPath(), globalMinioCertFile))
// If file exists and is regular return true.
if e == nil && st.Mode().IsRegular() {
return true
}
return false
}
// isKeyFileExists verifies if key file exists, returns true if found,
// false otherwise.
func isKeyFileExists() bool {
st, e := os.Stat(filepath.Join(mustGetCertsPath(), globalMinioKeyFile))
// If file exists and is regular return true.
if e == nil && st.Mode().IsRegular() {
return true
}
return false
}
// isSSL - returns true with both cert and key exists.
func isSSL() bool {
return isCertFileExists() && isKeyFileExists()
return isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())
}
// Reads certificated file and returns a list of parsed certificates.
func parsePublicCertFile(certFile string) (certs []*x509.Certificate, err error) {
var bytes []byte
if bytes, err = ioutil.ReadFile(certFile); err != nil {
return certs, err
}
// Parse all certs in the chain.
var block *pem.Block
var cert *x509.Certificate
current := bytes
for len(current) > 0 {
if block, current = pem.Decode(current); block == nil {
err = fmt.Errorf("Could not read PEM block from file %s", certFile)
return certs, err
}
if cert, err = x509.ParseCertificate(block.Bytes); err != nil {
return certs, err
}
certs = append(certs, cert)
}
if len(certs) == 0 {
err = fmt.Errorf("Empty public certificate file %s", certFile)
}
return certs, err
}
// Reads certificate file and returns a list of parsed certificates.
func readCertificateChain() ([]*x509.Certificate, error) {
bytes, err := ioutil.ReadFile(mustGetCertFile())
return parsePublicCertFile(getPublicCertFile())
}
func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
// Get all CA file names.
var caFiles []string
fis, err := ioutil.ReadDir(certsCAsDir)
if err != nil {
return nil, err
}
// Proceed to parse the certificates.
return parseCertificateChain(bytes)
}
// Parses certificate chain, returns a list of parsed certificates.
func parseCertificateChain(bytes []byte) ([]*x509.Certificate, error) {
var certs []*x509.Certificate
var block *pem.Block
current := bytes
// Parse all certs in the chain.
for len(current) > 0 {
block, current = pem.Decode(current)
if block == nil {
return nil, errors.New("Could not PEM block")
}
// Parse the decoded certificate.
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certs = append(certs, cert)
for _, fi := range fis {
caFiles = append(caFiles, filepath.Join(certsCAsDir, fi.Name()))
}
return certs, nil
}
// loadRootCAs fetches CA files provided in minio config and adds them to globalRootCAs
// Currently under Windows, there is no way to load system + user CAs at the same time
func loadRootCAs() {
caFiles := mustGetCAFiles()
if len(caFiles) == 0 {
return
return nil, nil
}
// Get system cert pool, and empty cert pool under Windows because it is not supported
globalRootCAs = mustGetSystemCertPool()
rootCAs, err := x509.SystemCertPool()
if err != nil {
// In some systems like Windows, system cert pool is not supported.
// Hence we create a new cert pool.
rootCAs = x509.NewCertPool()
}
// Load custom root CAs for client requests
for _, caFile := range caFiles {
caCert, err := ioutil.ReadFile(caFile)
if err != nil {
fatalIf(err, "Unable to load a CA file")
return rootCAs, err
}
globalRootCAs.AppendCertsFromPEM(caCert)
rootCAs.AppendCertsFromPEM(caCert)
}
return rootCAs, nil
}
// loadRootCAs fetches CA files provided in minio config and adds them to globalRootCAs
// Currently under Windows, there is no way to load system + user CAs at the same time
func loadRootCAs() (err error) {
globalRootCAs, err = getRootCAs(getCADir())
return err
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,52 +17,86 @@
package cmd
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"runtime"
"testing"
)
// Make sure we have a valid certs path.
func TestGetCertsPath(t *testing.T) {
path, err := getCertsPath()
func createTempFile(prefix, content string) (tempFile string, err error) {
var tmpfile *os.File
if tmpfile, err = ioutil.TempFile("", prefix); err != nil {
return tempFile, err
}
if _, err = tmpfile.Write([]byte(content)); err != nil {
return tempFile, err
}
if err = tmpfile.Close(); err != nil {
return tempFile, err
}
tempFile = tmpfile.Name()
return tempFile, err
}
func TestParsePublicCertFile(t *testing.T) {
tempFile1, err := createTempFile("public-cert-file", "")
if err != nil {
t.Error(err)
}
if path == "" {
t.Errorf("expected path to not be an empty string, got: '%s'", path)
}
// Ensure it contains some sort of path separator.
if !strings.ContainsRune(path, os.PathSeparator) {
t.Errorf("expected path to contain file separator")
}
// It should also be an absolute path.
if !filepath.IsAbs(path) {
t.Errorf("expected path to be an absolute path")
t.Fatalf("Unable to create temporary file. %v", err)
}
defer os.Remove(tempFile1)
// This will error if something goes wrong, so just call it.
mustGetCertsPath()
}
// Ensure that the certificate and key file getters contain their respective
// file name and endings.
func TestGetFiles(t *testing.T) {
file := mustGetCertFile()
if !strings.Contains(file, globalMinioCertFile) {
t.Errorf("CertFile does not contain %s", globalMinioCertFile)
tempFile2, err := createTempFile("public-cert-file",
`-----BEGIN CERTIFICATE-----
MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa
WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN
aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN
AQkBFg50ZXN0c0BtaW5pby5pbzAeFw0xNjEwMTQxMTM0MjJaFw0xNzEwMTQxMTM0
MjJaMH8xCzAJBgNVBAYTAlpZMQ4wDAYDVQQIEwVNaW5pbzERMA8GA1UEBxMISW50
ZXJuZXQxDjAMBgNVBA-some-junk-Q4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF
TWluaW8xHTAbBgkqhkiG9w0BCQEWDnRlc3RzQG1pbmlvLmlvMIGfMA0GCSqGSIb3
DQEBAQUAA4GNADCBiQKBgQDwNUYB/Sj79WsUE8qnXzzh2glSzWxUE79sCOpQYK83
HWkrl5WxlG8ZxDR1IQV9Ex/lzigJu8G+KXahon6a+3n5GhNrYRe5kIXHQHz0qvv4
aMulqlnYpvSfC83aaO9GVBtwXS/O4Nykd7QBg4nZlazVmsGk7POOjhpjGShRsqpU
JwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBALqjOA6bD8BEl7hkQ8XwX/owSAL0URDe
nUfCOsXgIIAqgw4uTCLOfCJVZNKmRT+KguvPAQ6Z80vau2UxPX5Q2Q+OHXDRrEnK
FjqSBgLP06Qw7a++bshlWGTt5bHWOneW3EQikedckVuIKPkOCib9yGi4VmBBjdFE
M9ofSEt/bdRD
-----END CERTIFICATE-----`)
if err != nil {
t.Fatalf("Unable to create temporary file. %v", err)
}
defer os.Remove(tempFile2)
file = mustGetKeyFile()
if !strings.Contains(file, globalMinioKeyFile) {
t.Errorf("KeyFile does not contain %s", globalMinioKeyFile)
tempFile3, err := createTempFile("public-cert-file",
`-----BEGIN CERTIFICATE-----
MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa
WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN
aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN
AQkBFg50ZXN0c0BtaW5pby5pbzAeFw0xNjEwMTQxMTM0MjJaFw0xNzEwMTQxMTM0
MjJaMH8xCzAJBgNVBAYTAlpZMQ4wDAYDVQQIEwVNaW5pbzERMA8GA1UEBxMISW50
ZXJuZXQxDjAMBgNVBAabababababaQ4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF
TWluaW8xHTAbBgkqhkiG9w0BCQEWDnRlc3RzQG1pbmlvLmlvMIGfMA0GCSqGSIb3
DQEBAQUAA4GNADCBiQKBgQDwNUYB/Sj79WsUE8qnXzzh2glSzWxUE79sCOpQYK83
HWkrl5WxlG8ZxDR1IQV9Ex/lzigJu8G+KXahon6a+3n5GhNrYRe5kIXHQHz0qvv4
aMulqlnYpvSfC83aaO9GVBtwXS/O4Nykd7QBg4nZlazVmsGk7POOjhpjGShRsqpU
JwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBALqjOA6bD8BEl7hkQ8XwX/owSAL0URDe
nUfCOsXgIIAqgw4uTCLOfCJVZNKmRT+KguvPAQ6Z80vau2UxPX5Q2Q+OHXDRrEnK
FjqSBgLP06Qw7a++bshlWGTt5bHWOneW3EQikedckVuIKPkOCib9yGi4VmBBjdFE
M9ofSEt/bdRD
-----END CERTIFICATE-----`)
if err != nil {
t.Fatalf("Unable to create temporary file. %v", err)
}
}
defer os.Remove(tempFile3)
// Parses .crt file contents
func TestParseCertificateChain(t *testing.T) {
// given
cert := `-----BEGIN CERTIFICATE-----
tempFile4, err := createTempFile("public-cert-file",
`-----BEGIN CERTIFICATE-----
MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa
WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN
aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN
@@ -77,35 +111,149 @@ JwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBALqjOA6bD8BEl7hkQ8XwX/owSAL0URDe
nUfCOsXgIIAqgw4uTCLOfCJVZNKmRT+KguvPAQ6Z80vau2UxPX5Q2Q+OHXDRrEnK
FjqSBgLP06Qw7a++bshlWGTt5bHWOneW3EQikedckVuIKPkOCib9yGi4VmBBjdFE
M9ofSEt/bdRD
-----END CERTIFICATE-----`
// when
certs, err := parseCertificateChain([]byte(cert))
// then
-----END CERTIFICATE-----`)
if err != nil {
t.Fatalf("Could not parse certificate: %s", err)
t.Fatalf("Unable to create temporary file. %v", err)
}
defer os.Remove(tempFile4)
tempFile5, err := createTempFile("public-cert-file",
`-----BEGIN CERTIFICATE-----
MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa
WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN
aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN
AQkBFg50ZXN0c0BtaW5pby5pbzAeFw0xNjEwMTQxMTM0MjJaFw0xNzEwMTQxMTM0
MjJaMH8xCzAJBgNVBAYTAlpZMQ4wDAYDVQQIEwVNaW5pbzERMA8GA1UEBxMISW50
ZXJuZXQxDjAMBgNVBAoTBU1pbmlvMQ4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF
TWluaW8xHTAbBgkqhkiG9w0BCQEWDnRlc3RzQG1pbmlvLmlvMIGfMA0GCSqGSIb3
DQEBAQUAA4GNADCBiQKBgQDwNUYB/Sj79WsUE8qnXzzh2glSzWxUE79sCOpQYK83
HWkrl5WxlG8ZxDR1IQV9Ex/lzigJu8G+KXahon6a+3n5GhNrYRe5kIXHQHz0qvv4
aMulqlnYpvSfC83aaO9GVBtwXS/O4Nykd7QBg4nZlazVmsGk7POOjhpjGShRsqpU
JwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBALqjOA6bD8BEl7hkQ8XwX/owSAL0URDe
nUfCOsXgIIAqgw4uTCLOfCJVZNKmRT+KguvPAQ6Z80vau2UxPX5Q2Q+OHXDRrEnK
FjqSBgLP06Qw7a++bshlWGTt5bHWOneW3EQikedckVuIKPkOCib9yGi4VmBBjdFE
M9ofSEt/bdRD
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa
WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN
aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN
AQkBFg50ZXN0c0BtaW5pby5pbzAeFw0xNjEwMTQxMTM0MjJaFw0xNzEwMTQxMTM0
MjJaMH8xCzAJBgNVBAYTAlpZMQ4wDAYDVQQIEwVNaW5pbzERMA8GA1UEBxMISW50
ZXJuZXQxDjAMBgNVBAoTBU1pbmlvMQ4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF
TWluaW8xHTAbBgkqhkiG9w0BCQEWDnRlc3RzQG1pbmlvLmlvMIGfMA0GCSqGSIb3
DQEBAQUAA4GNADCBiQKBgQDwNUYB/Sj79WsUE8qnXzzh2glSzWxUE79sCOpQYK83
HWkrl5WxlG8ZxDR1IQV9Ex/lzigJu8G+KXahon6a+3n5GhNrYRe5kIXHQHz0qvv4
aMulqlnYpvSfC83aaO9GVBtwXS/O4Nykd7QBg4nZlazVmsGk7POOjhpjGShRsqpU
JwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBALqjOA6bD8BEl7hkQ8XwX/owSAL0URDe
nUfCOsXgIIAqgw4uTCLOfCJVZNKmRT+KguvPAQ6Z80vau2UxPX5Q2Q+OHXDRrEnK
FjqSBgLP06Qw7a++bshlWGTt5bHWOneW3EQikedckVuIKPkOCib9yGi4VmBBjdFE
M9ofSEt/bdRD
-----END CERTIFICATE-----`)
if err != nil {
t.Fatalf("Unable to create temporary file. %v", err)
}
defer os.Remove(tempFile5)
nonexistentErr := fmt.Errorf("open nonexistent-file: no such file or directory")
if runtime.GOOS == "windows" {
// Below concatenation is done to get rid of goline error
// "error strings should not be capitalized or end with punctuation or a newline"
nonexistentErr = fmt.Errorf("open nonexistent-file:" + " The system cannot find the file specified.")
}
if len(certs) != 1 {
t.Fatalf("Expected number of certificates in chain was 1, actual: %d", len(certs))
testCases := []struct {
certFile string
expectedResultLen int
expectedErr error
}{
{"nonexistent-file", 0, nonexistentErr},
{tempFile1, 0, fmt.Errorf("Empty public certificate file %s", tempFile1)},
{tempFile2, 0, fmt.Errorf("Could not read PEM block from file %s", tempFile2)},
{tempFile3, 0, fmt.Errorf("asn1: structure error: sequence tag mismatch")},
{tempFile4, 1, nil},
{tempFile5, 2, nil},
}
if certs[0].Subject.CommonName != "Minio" {
t.Fatalf("Expected Subject.CommonName was Minio, actual: %s", certs[0].Subject.CommonName)
for _, testCase := range testCases {
certs, err := parsePublicCertFile(testCase.certFile)
if testCase.expectedErr == nil {
if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else if testCase.expectedErr.Error() != err.Error() {
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
if len(certs) != testCase.expectedResultLen {
t.Fatalf("certs: expected = %v, got = %v", testCase.expectedResultLen, len(certs))
}
}
}
// Parses invalid .crt file contents and returns error
func TestParseInvalidCertificateChain(t *testing.T) {
// given
cert := `This is now valid certificate`
func TestGetRootCAs(t *testing.T) {
emptydir, err := ioutil.TempDir("", "test-get-root-cas")
if err != nil {
t.Fatalf("Unable create temp directory. %v", emptydir)
}
defer os.RemoveAll(emptydir)
// when
_, err := parseCertificateChain([]byte(cert))
dir1, err := ioutil.TempDir("", "test-get-root-cas")
if err != nil {
t.Fatalf("Unable create temp directory. %v", dir1)
}
defer os.RemoveAll(dir1)
if err = os.Mkdir(filepath.Join(dir1, "empty-dir"), 0755); err != nil {
t.Fatalf("Unable create empty dir. %v", err)
}
// then
if err == nil {
t.Fatalf("Expected error but none occurred")
dir2, err := ioutil.TempDir("", "test-get-root-cas")
if err != nil {
t.Fatalf("Unable create temp directory. %v", dir2)
}
defer os.RemoveAll(dir2)
if err = ioutil.WriteFile(filepath.Join(dir2, "empty-file"), []byte{}, 0644); err != nil {
t.Fatalf("Unable create test file. %v", err)
}
nonexistentErr := fmt.Errorf("open nonexistent-dir: no such file or directory")
if runtime.GOOS == "windows" {
// Below concatenation is done to get rid of goline error
// "error strings should not be capitalized or end with punctuation or a newline"
nonexistentErr = fmt.Errorf("open nonexistent-dir:" + " The system cannot find the file specified.")
}
err1 := fmt.Errorf("read %s: is a directory", filepath.Join(dir1, "empty-dir"))
if runtime.GOOS == "windows" {
// Below concatenation is done to get rid of goline error
// "error strings should not be capitalized or end with punctuation or a newline"
err1 = fmt.Errorf("read %s:"+" The handle is invalid.", filepath.Join(dir1, "empty-dir"))
}
testCases := []struct {
certCAsDir string
expectedErr error
}{
{"nonexistent-dir", nonexistentErr},
{dir1, err1},
{emptydir, nil},
{dir2, nil},
}
for _, testCase := range testCases {
_, err := getRootCAs(testCase.certCAsDir)
if testCase.expectedErr == nil {
if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else if testCase.expectedErr.Error() != err.Error() {
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
}
}

139
cmd/config-dir.go Normal file
View File

@@ -0,0 +1,139 @@
/*
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"path/filepath"
"sync"
homedir "github.com/minio/go-homedir"
"github.com/minio/mc/pkg/console"
)
const (
// Default minio configuration directory where below configuration files/directories are stored.
defaultMinioConfigDir = ".minio"
// Minio configuration file.
minioConfigFile = "config.json"
// Directory contains below files/directories for HTTPS configuration.
certsDir = "certs"
// Directory contains all CA certificates other than system defaults for HTTPS.
certsCADir = "CAs"
// Public certificate file for HTTPS.
publicCertFile = "public.crt"
// Private key file for HTTPS.
privateKeyFile = "private.key"
)
// ConfigDir - configuration directory with locking.
type ConfigDir struct {
sync.Mutex
dir string
}
// Set - saves given directory as configuration directory.
func (config *ConfigDir) Set(dir string) {
config.Lock()
defer config.Unlock()
config.dir = dir
}
// Get - returns current configuration directory.
func (config *ConfigDir) Get() string {
config.Lock()
defer config.Unlock()
return config.dir
}
func (config *ConfigDir) getCertsDir() string {
return filepath.Join(config.Get(), certsDir)
}
// GetCADir - returns certificate CA directory.
func (config *ConfigDir) GetCADir() string {
return filepath.Join(config.getCertsDir(), certsCADir)
}
// Create - creates configuration directory tree.
func (config *ConfigDir) Create() error {
return mkdirAll(config.GetCADir(), 0700)
}
// GetMinioConfigFile - returns absolute path of config.json file.
func (config *ConfigDir) GetMinioConfigFile() string {
return filepath.Join(config.Get(), minioConfigFile)
}
// GetPublicCertFile - returns absolute path of public.crt file.
func (config *ConfigDir) GetPublicCertFile() string {
return filepath.Join(config.getCertsDir(), publicCertFile)
}
// GetPrivateKeyFile - returns absolute path of private.key file.
func (config *ConfigDir) GetPrivateKeyFile() string {
return filepath.Join(config.getCertsDir(), privateKeyFile)
}
func mustGetDefaultConfigDir() string {
homeDir, err := homedir.Dir()
if err != nil {
console.Fatalln("Unable to get home directory.", err)
}
return filepath.Join(homeDir, defaultMinioConfigDir)
}
var configDir = &ConfigDir{dir: mustGetDefaultConfigDir()}
func setConfigDir(dir string) {
configDir.Set(dir)
}
func getConfigDir() string {
return configDir.Get()
}
func getCADir() string {
return configDir.GetCADir()
}
func createConfigDir() error {
return configDir.Create()
}
func getConfigFile() string {
return configDir.GetMinioConfigFile()
}
func getPublicCertFile() string {
return configDir.GetPublicCertFile()
}
func getPrivateKeyFile() string {
return configDir.GetPrivateKeyFile()
}
func isConfigFileExists() bool {
return isFile(getConfigFile())
}

View File

@@ -74,6 +74,10 @@ func migrateConfig() error {
if err := migrateV12ToV13(); err != nil {
return err
}
// Migration version '13' to '14'.
if err := migrateV13ToV14(); err != nil {
return err
}
return nil
}
@@ -90,15 +94,10 @@ func purgeV1() error {
}
if cv1.Version == "1" {
console.Println("Removed unsupported config version 1.")
/// Purge old fsUsers.json file
configPath, err := getConfigPath()
if err != nil {
return fmt.Errorf("Unable to retrieve config path. %v", err)
}
configFile := filepath.Join(configPath, "fsUsers.json")
// Purge old fsUsers.json file
configFile := filepath.Join(getConfigDir(), "fsUsers.json")
removeAll(configFile)
console.Println("Removed unsupported config version 1.")
return nil
}
return fmt.Errorf("Failed to migrate unrecognized config version " + cv1.Version + ".")
@@ -117,13 +116,16 @@ func migrateV2ToV3() error {
if cv2.Version != "2" {
return nil
}
cred, err := createCredential(cv2.Credentials.AccessKey, cv2.Credentials.SecretKey)
if err != nil {
return fmt.Errorf("Invalid credential in V2 configuration file. %v", err)
}
srvConfig := &configV3{}
srvConfig.Version = "3"
srvConfig.Addr = ":9000"
srvConfig.Credential = credential{
AccessKey: cv2.Credentials.AccessKey,
SecretKey: cv2.Credentials.SecretKey,
}
srvConfig.Credential = cred
srvConfig.Region = cv2.Credentials.Region
if srvConfig.Region == "" {
// Region needs to be set for AWS Signature V4.
@@ -154,12 +156,7 @@ func migrateV2ToV3() error {
return fmt.Errorf("Unable to initialize config. %v", err)
}
configFile, err := getConfigFile()
if err != nil {
return fmt.Errorf("Unable to get config file. %v", err)
}
// Migrate the config.
configFile := getConfigFile()
err = qc.Save(configFile)
if err != nil {
return fmt.Errorf("Failed to migrate config from "+cv2.Version+" to "+srvConfig.Version+" failed. %v", err)
@@ -201,11 +198,8 @@ func migrateV3ToV4() error {
if err != nil {
return fmt.Errorf("Unable to initialize the quick config. %v", err)
}
configFile, err := getConfigFile()
if err != nil {
return fmt.Errorf("Unable to get config file. %v", err)
}
configFile := getConfigFile()
err = qc.Save(configFile)
if err != nil {
return fmt.Errorf("Failed to migrate config from "+cv3.Version+" to "+srvConfig.Version+" failed. %v", err)
@@ -250,11 +244,8 @@ func migrateV4ToV5() error {
if err != nil {
return fmt.Errorf("Unable to initialize the quick config. %v", err)
}
configFile, err := getConfigFile()
if err != nil {
return fmt.Errorf("Unable to get config file. %v", err)
}
configFile := getConfigFile()
err = qc.Save(configFile)
if err != nil {
return fmt.Errorf("Failed to migrate config from "+cv4.Version+" to "+srvConfig.Version+" failed. %v", err)
@@ -326,11 +317,8 @@ func migrateV5ToV6() error {
if err != nil {
return fmt.Errorf("Unable to initialize the quick config. %v", err)
}
configFile, err := getConfigFile()
if err != nil {
return fmt.Errorf("Unable to get config file. %v", err)
}
configFile := getConfigFile()
err = qc.Save(configFile)
if err != nil {
return fmt.Errorf("Failed to migrate config from "+cv5.Version+" to "+srvConfig.Version+" failed. %v", err)
@@ -390,11 +378,8 @@ func migrateV6ToV7() error {
if err != nil {
return fmt.Errorf("Unable to initialize the quick config. %v", err)
}
configFile, err := getConfigFile()
if err != nil {
return fmt.Errorf("Unable to get config file. %v", err)
}
configFile := getConfigFile()
err = qc.Save(configFile)
if err != nil {
return fmt.Errorf("Failed to migrate config from "+cv6.Version+" to "+srvConfig.Version+" failed. %v", err)
@@ -461,11 +446,8 @@ func migrateV7ToV8() error {
if err != nil {
return fmt.Errorf("Unable to initialize the quick config. %v", err)
}
configFile, err := getConfigFile()
if err != nil {
return fmt.Errorf("Unable to get config file. %v", err)
}
configFile := getConfigFile()
err = qc.Save(configFile)
if err != nil {
return fmt.Errorf("Failed to migrate config from "+cv7.Version+" to "+srvConfig.Version+" failed. %v", err)
@@ -540,11 +522,8 @@ func migrateV8ToV9() error {
return fmt.Errorf("Unable to initialize the quick config. %v",
err)
}
configFile, err := getConfigFile()
if err != nil {
return fmt.Errorf("Unable to get config file. %v", err)
}
configFile := getConfigFile()
err = qc.Save(configFile)
if err != nil {
return fmt.Errorf(
@@ -625,11 +604,8 @@ func migrateV9ToV10() error {
return fmt.Errorf("Unable to initialize the quick config. %v",
err)
}
configFile, err := getConfigFile()
if err != nil {
return fmt.Errorf("Unable to get config file. %v", err)
}
configFile := getConfigFile()
err = qc.Save(configFile)
if err != nil {
return fmt.Errorf(
@@ -713,11 +689,8 @@ func migrateV10ToV11() error {
return fmt.Errorf("Unable to initialize the quick config. %v",
err)
}
configFile, err := getConfigFile()
if err != nil {
return fmt.Errorf("Unable to get config file. %v", err)
}
configFile := getConfigFile()
err = qc.Save(configFile)
if err != nil {
return fmt.Errorf(
@@ -819,11 +792,8 @@ func migrateV11ToV12() error {
return fmt.Errorf("Unable to initialize the quick config. %v",
err)
}
configFile, err := getConfigFile()
if err != nil {
return fmt.Errorf("Unable to get config file. %v", err)
}
configFile := getConfigFile()
err = qc.Save(configFile)
if err != nil {
return fmt.Errorf(
@@ -916,11 +886,8 @@ func migrateV12ToV13() error {
return fmt.Errorf("Unable to initialize the quick config. %v",
err)
}
configFile, err := getConfigFile()
if err != nil {
return fmt.Errorf("Unable to get config file. %v", err)
}
configFile := getConfigFile()
err = qc.Save(configFile)
if err != nil {
return fmt.Errorf(
@@ -937,3 +904,102 @@ func migrateV12ToV13() error {
)
return nil
}
// Version '13' to '14' migration. Add support for custom webhook endpoint.
func migrateV13ToV14() error {
cv13, err := loadConfigV13()
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("Unable to load config version 13. %v", err)
}
if cv13.Version != "13" {
return nil
}
// Copy over fields from V13 into V14 config struct
srvConfig := &serverConfigV14{
Logger: &logger{},
Notify: &notifier{},
}
srvConfig.Version = "14"
srvConfig.Credential = cv13.Credential
srvConfig.Region = cv13.Region
if srvConfig.Region == "" {
// Region needs to be set for AWS Signature Version 4.
srvConfig.Region = globalMinioDefaultRegion
}
srvConfig.Logger.Console = cv13.Logger.Console
srvConfig.Logger.File = cv13.Logger.File
// check and set notifiers config
if len(cv13.Notify.AMQP) == 0 {
srvConfig.Notify.AMQP = make(map[string]amqpNotify)
srvConfig.Notify.AMQP["1"] = amqpNotify{}
} else {
srvConfig.Notify.AMQP = cv13.Notify.AMQP
}
if len(cv13.Notify.ElasticSearch) == 0 {
srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify)
srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{}
} else {
srvConfig.Notify.ElasticSearch = cv13.Notify.ElasticSearch
}
if len(cv13.Notify.Redis) == 0 {
srvConfig.Notify.Redis = make(map[string]redisNotify)
srvConfig.Notify.Redis["1"] = redisNotify{}
} else {
srvConfig.Notify.Redis = cv13.Notify.Redis
}
if len(cv13.Notify.PostgreSQL) == 0 {
srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify)
srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{}
} else {
srvConfig.Notify.PostgreSQL = cv13.Notify.PostgreSQL
}
if len(cv13.Notify.Kafka) == 0 {
srvConfig.Notify.Kafka = make(map[string]kafkaNotify)
srvConfig.Notify.Kafka["1"] = kafkaNotify{}
} else {
srvConfig.Notify.Kafka = cv13.Notify.Kafka
}
if len(cv13.Notify.NATS) == 0 {
srvConfig.Notify.NATS = make(map[string]natsNotify)
srvConfig.Notify.NATS["1"] = natsNotify{}
} else {
srvConfig.Notify.NATS = cv13.Notify.NATS
}
if len(cv13.Notify.Webhook) == 0 {
srvConfig.Notify.Webhook = make(map[string]webhookNotify)
srvConfig.Notify.Webhook["1"] = webhookNotify{}
} else {
srvConfig.Notify.Webhook = cv13.Notify.Webhook
}
// Set the new browser parameter to true by default
srvConfig.Browser = "on"
qc, err := quick.New(srvConfig)
if err != nil {
return fmt.Errorf("Unable to initialize the quick config. %v",
err)
}
configFile := getConfigFile()
err = qc.Save(configFile)
if err != nil {
return fmt.Errorf(
"Failed to migrate config from "+
cv13.Version+" to "+srvConfig.Version+
" failed. %v", err,
)
}
console.Println(
"Migration from version " +
cv13.Version + " to " + srvConfig.Version +
" completed successfully.",
)
return nil
}

View File

@@ -31,7 +31,7 @@ func TestServerConfigMigrateV1(t *testing.T) {
// remove the root directory after the test ends.
defer removeAll(rootPath)
setGlobalConfigPath(rootPath)
setConfigDir(rootPath)
// Create a V1 config json file and store it
configJSON := "{ \"version\":\"1\", \"accessKeyId\":\"abcde\", \"secretAccessKey\":\"abcdefgh\"}"
@@ -50,7 +50,7 @@ func TestServerConfigMigrateV1(t *testing.T) {
}
// Initialize server config and check again if everything is fine
if err := loadConfig(credential{}); err != nil {
if err := loadConfig(envParams{}); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
}
@@ -65,8 +65,8 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) {
// remove the root directory after the test ends.
defer removeAll(rootPath)
setGlobalConfigPath(rootPath)
configPath := rootPath + "/" + globalMinioConfigFile
setConfigDir(rootPath)
configPath := rootPath + "/" + minioConfigFile
// Remove config file
if err := os.Remove(configPath); err != nil {
@@ -106,10 +106,13 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) {
if err := migrateV12ToV13(); err != nil {
t.Fatal("migrate v12 to v13 should succeed when no config file is found")
}
if err := migrateV13ToV14(); err != nil {
t.Fatal("migrate v13 to v14 should succeed when no config file is found")
}
}
// Test if a config migration from v2 to v12 is successfully done
func TestServerConfigMigrateV2toV12(t *testing.T) {
func TestServerConfigMigrateV2toV14(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("Init Test config failed")
@@ -117,8 +120,8 @@ func TestServerConfigMigrateV2toV12(t *testing.T) {
// remove the root directory after the test ends.
defer removeAll(rootPath)
setGlobalConfigPath(rootPath)
configPath := rootPath + "/" + globalMinioConfigFile
setConfigDir(rootPath)
configPath := rootPath + "/" + minioConfigFile
// Create a corrupted config file
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0644); err != nil {
@@ -143,12 +146,12 @@ func TestServerConfigMigrateV2toV12(t *testing.T) {
}
// Initialize server config and check again if everything is fine
if err := loadConfig(credential{}); err != nil {
if err := loadConfig(envParams{}); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
// Check the version number in the upgraded config file
expectedVersion := globalMinioConfigVersion
expectedVersion := v14
if serverConfig.Version != expectedVersion {
t.Fatalf("Expect version "+expectedVersion+", found: %v", serverConfig.Version)
}
@@ -171,8 +174,8 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
// remove the root directory after the test ends.
defer removeAll(rootPath)
setGlobalConfigPath(rootPath)
configPath := rootPath + "/" + globalMinioConfigFile
setConfigDir(rootPath)
configPath := rootPath + "/" + minioConfigFile
// Create a corrupted config file
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\""), 0644); err != nil {
@@ -213,4 +216,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
if err := migrateV12ToV13(); err == nil {
t.Fatal("migrateConfigV12ToV13() should fail with a corrupted json")
}
if err := migrateV13ToV14(); err == nil {
t.Fatal("migrateConfigV13ToV14() should fail with a corrupted json")
}
}

View File

@@ -1,3 +1,19 @@
/*
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
@@ -8,6 +24,23 @@ import (
"github.com/minio/minio/pkg/quick"
)
func loadOldConfig(configFile string, config interface{}) (interface{}, error) {
if _, err := os.Stat(configFile); err != nil {
return nil, err
}
qc, err := quick.New(config)
if err != nil {
return nil, err
}
if err = qc.Load(configFile); err != nil {
return nil, err
}
return config, nil
}
/////////////////// Config V1 ///////////////////
type configV1 struct {
Version string `json:"version"`
@@ -17,24 +50,12 @@ type configV1 struct {
// loadConfigV1 load config
func loadConfigV1() (*configV1, error) {
configPath, err := getConfigPath()
if err != nil {
configFile := filepath.Join(getConfigDir(), "fsUsers.json")
config, err := loadOldConfig(configFile, &configV1{Version: "1"})
if config == nil {
return nil, err
}
configFile := filepath.Join(configPath, "fsUsers.json")
if _, err = os.Stat(configFile); err != nil {
return nil, err
}
c := &configV1{}
c.Version = "1"
qc, err := quick.New(c)
if err != nil {
return nil, err
}
if err := qc.Load(configFile); err != nil {
return nil, err
}
return c, nil
return config.(*configV1), err
}
/////////////////// Config V2 ///////////////////
@@ -61,23 +82,12 @@ type configV2 struct {
// loadConfigV2 load config version '2'.
func loadConfigV2() (*configV2, error) {
configFile, err := getConfigFile()
if err != nil {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &configV2{Version: "2"})
if config == nil {
return nil, err
}
if _, err = os.Stat(configFile); err != nil {
return nil, err
}
c := &configV2{}
c.Version = "2"
qc, err := quick.New(c)
if err != nil {
return nil, err
}
if err := qc.Load(configFile); err != nil {
return nil, err
}
return c, nil
return config.(*configV2), err
}
/////////////////// Config V3 ///////////////////
@@ -135,23 +145,12 @@ type configV3 struct {
// loadConfigV3 load config version '3'.
func loadConfigV3() (*configV3, error) {
configFile, err := getConfigFile()
if err != nil {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &configV3{Version: "3"})
if config == nil {
return nil, err
}
if _, err = os.Stat(configFile); err != nil {
return nil, err
}
c := &configV3{}
c.Version = "3"
qc, err := quick.New(c)
if err != nil {
return nil, err
}
if err := qc.Load(configFile); err != nil {
return nil, err
}
return c, nil
return config.(*configV3), err
}
// logger type representing version '4' logger config.
@@ -186,23 +185,12 @@ type configV4 struct {
// loadConfigV4 load config version '4'.
func loadConfigV4() (*configV4, error) {
configFile, err := getConfigFile()
if err != nil {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &configV4{Version: "4"})
if config == nil {
return nil, err
}
if _, err = os.Stat(configFile); err != nil {
return nil, err
}
c := &configV4{}
c.Version = "4"
qc, err := quick.New(c)
if err != nil {
return nil, err
}
if err := qc.Load(configFile); err != nil {
return nil, err
}
return c, nil
return config.(*configV4), err
}
// logger type representing version '5' logger config.
@@ -264,23 +252,12 @@ type configV5 struct {
// loadConfigV5 load config version '5'.
func loadConfigV5() (*configV5, error) {
configFile, err := getConfigFile()
if err != nil {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &configV5{Version: "5"})
if config == nil {
return nil, err
}
if _, err = os.Stat(configFile); err != nil {
return nil, err
}
c := &configV5{}
c.Version = "5"
qc, err := quick.New(c)
if err != nil {
return nil, err
}
if err := qc.Load(configFile); err != nil {
return nil, err
}
return c, nil
return config.(*configV5), err
}
type loggerV6 struct {
@@ -306,23 +283,12 @@ type configV6 struct {
// loadConfigV6 load config version '6'.
func loadConfigV6() (*configV6, error) {
configFile, err := getConfigFile()
if err != nil {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &configV6{Version: "6"})
if config == nil {
return nil, err
}
if _, err = os.Stat(configFile); err != nil {
return nil, err
}
c := &configV6{}
c.Version = "6"
qc, err := quick.New(c)
if err != nil {
return nil, err
}
if err := qc.Load(configFile); err != nil {
return nil, err
}
return c, nil
return config.(*configV6), err
}
// Notifier represents collection of supported notification queues in version
@@ -367,23 +333,12 @@ type serverConfigV7 struct {
// loadConfigV7 load config version '7'.
func loadConfigV7() (*serverConfigV7, error) {
configFile, err := getConfigFile()
if err != nil {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV7{Version: "7"})
if config == nil {
return nil, err
}
if _, err = os.Stat(configFile); err != nil {
return nil, err
}
c := &serverConfigV7{}
c.Version = "7"
qc, err := quick.New(c)
if err != nil {
return nil, err
}
if err := qc.Load(configFile); err != nil {
return nil, err
}
return c, nil
return config.(*serverConfigV7), err
}
// serverConfigV8 server configuration version '8'. Adds NATS notifier
@@ -407,23 +362,12 @@ type serverConfigV8 struct {
// loadConfigV8 load config version '8'.
func loadConfigV8() (*serverConfigV8, error) {
configFile, err := getConfigFile()
if err != nil {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV8{Version: "8"})
if config == nil {
return nil, err
}
if _, err = os.Stat(configFile); err != nil {
return nil, err
}
c := &serverConfigV8{}
c.Version = "8"
qc, err := quick.New(c)
if err != nil {
return nil, err
}
if err := qc.Load(configFile); err != nil {
return nil, err
}
return c, nil
return config.(*serverConfigV8), err
}
// serverConfigV9 server configuration version '9'. Adds PostgreSQL
@@ -446,24 +390,12 @@ type serverConfigV9 struct {
}
func loadConfigV9() (*serverConfigV9, error) {
configFile, err := getConfigFile()
if err != nil {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV9{Version: "9"})
if config == nil {
return nil, err
}
if _, err = os.Stat(configFile); err != nil {
return nil, err
}
srvCfg := &serverConfigV9{}
srvCfg.Version = "9"
srvCfg.rwMutex = &sync.RWMutex{}
qc, err := quick.New(srvCfg)
if err != nil {
return nil, err
}
if err := qc.Load(configFile); err != nil {
return nil, err
}
return srvCfg, nil
return config.(*serverConfigV9), err
}
// serverConfigV10 server configuration version '10' which is like
@@ -484,23 +416,12 @@ type serverConfigV10 struct {
}
func loadConfigV10() (*serverConfigV10, error) {
configFile, err := getConfigFile()
if err != nil {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV10{Version: "10"})
if config == nil {
return nil, err
}
if _, err = os.Stat(configFile); err != nil {
return nil, err
}
srvCfg := &serverConfigV10{}
srvCfg.Version = "10"
qc, err := quick.New(srvCfg)
if err != nil {
return nil, err
}
if err := qc.Load(configFile); err != nil {
return nil, err
}
return srvCfg, nil
return config.(*serverConfigV10), err
}
// natsNotifyV1 - structure was valid until config V 11
@@ -532,23 +453,12 @@ type serverConfigV11 struct {
}
func loadConfigV11() (*serverConfigV11, error) {
configFile, err := getConfigFile()
if err != nil {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV11{Version: "11"})
if config == nil {
return nil, err
}
if _, err = os.Stat(configFile); err != nil {
return nil, err
}
srvCfg := &serverConfigV11{}
srvCfg.Version = "11"
qc, err := quick.New(srvCfg)
if err != nil {
return nil, err
}
if err := qc.Load(configFile); err != nil {
return nil, err
}
return srvCfg, nil
return config.(*serverConfigV11), err
}
// serverConfigV12 server configuration version '12' which is like
@@ -568,21 +478,35 @@ type serverConfigV12 struct {
}
func loadConfigV12() (*serverConfigV12, error) {
configFile, err := getConfigFile()
if err != nil {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV12{Version: "12"})
if config == nil {
return nil, err
}
if _, err = os.Stat(configFile); err != nil {
return nil, err
}
srvCfg := &serverConfigV12{}
srvCfg.Version = "12"
qc, err := quick.New(srvCfg)
if err != nil {
return nil, err
}
if err := qc.Load(configFile); err != nil {
return nil, err
}
return srvCfg, nil
return config.(*serverConfigV12), err
}
// serverConfigV13 server configuration version '13' which is like
// version '12' except it adds support for webhook notification.
type serverConfigV13 struct {
Version string `json:"version"`
// S3 API configuration.
Credential credential `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger *logger `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
}
func loadConfigV13() (*serverConfigV13, error) {
configFile := getConfigFile()
config, err := loadOldConfig(configFile, &serverConfigV13{Version: "13"})
if config == nil {
return nil, err
}
return config.(*serverConfigV13), err
}

View File

@@ -1,206 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"os"
"sync"
"github.com/minio/minio/pkg/quick"
)
// Read Write mutex for safe access to ServerConfig.
var serverConfigMu sync.RWMutex
// serverConfigV13 server configuration version '13' which is like
// version '12' except it adds support for webhook notification.
type serverConfigV13 struct {
Version string `json:"version"`
// S3 API configuration.
Credential credential `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger *logger `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
}
// newConfig - initialize a new server config, saves creds from env
// if globalIsEnvCreds is set otherwise generates a new set of keys
// and those are saved.
func newConfig(envCreds credential) error {
// Initialize server config.
srvCfg := &serverConfigV13{
Logger: &logger{},
Notify: &notifier{},
}
srvCfg.Version = globalMinioConfigVersion
srvCfg.Region = globalMinioDefaultRegion
// If env is set for a fresh start, save them to config file.
if globalIsEnvCreds {
srvCfg.SetCredential(envCreds)
} else {
srvCfg.SetCredential(newCredential())
}
// Enable console logger by default on a fresh run.
srvCfg.Logger.Console = consoleLogger{
Enable: true,
Level: "error",
}
// Make sure to initialize notification configs.
srvCfg.Notify.AMQP = make(map[string]amqpNotify)
srvCfg.Notify.AMQP["1"] = amqpNotify{}
srvCfg.Notify.ElasticSearch = make(map[string]elasticSearchNotify)
srvCfg.Notify.ElasticSearch["1"] = elasticSearchNotify{}
srvCfg.Notify.Redis = make(map[string]redisNotify)
srvCfg.Notify.Redis["1"] = redisNotify{}
srvCfg.Notify.NATS = make(map[string]natsNotify)
srvCfg.Notify.NATS["1"] = natsNotify{}
srvCfg.Notify.PostgreSQL = make(map[string]postgreSQLNotify)
srvCfg.Notify.PostgreSQL["1"] = postgreSQLNotify{}
srvCfg.Notify.Kafka = make(map[string]kafkaNotify)
srvCfg.Notify.Kafka["1"] = kafkaNotify{}
srvCfg.Notify.Webhook = make(map[string]webhookNotify)
srvCfg.Notify.Webhook["1"] = webhookNotify{}
// Create config path.
if err := createConfigPath(); err != nil {
return err
}
// hold the mutex lock before a new config is assigned.
// Save the new config globally.
// unlock the mutex.
serverConfigMu.Lock()
serverConfig = srvCfg
serverConfigMu.Unlock()
// Save config into file.
return serverConfig.Save()
}
// loadConfig - loads a new config from disk, overrides creds from env
// if globalIsEnvCreds is set otherwise serves the creds from loaded
// from the disk.
func loadConfig(envCreds credential) error {
configFile, err := getConfigFile()
if err != nil {
return err
}
if _, err = os.Stat(configFile); err != nil {
return err
}
srvCfg := &serverConfigV13{}
srvCfg.Version = globalMinioConfigVersion
qc, err := quick.New(srvCfg)
if err != nil {
return err
}
if err = qc.Load(configFile); err != nil {
return err
}
// If env is set override the credentials from config file.
if globalIsEnvCreds {
srvCfg.SetCredential(envCreds)
} else {
srvCfg.SetCredential(srvCfg.Credential)
}
// hold the mutex lock before a new config is assigned.
serverConfigMu.Lock()
// Save the loaded config globally.
serverConfig = srvCfg
serverConfigMu.Unlock()
// Set the version properly after the unmarshalled json is loaded.
serverConfig.Version = globalMinioConfigVersion
return nil
}
// serverConfig server config.
var serverConfig *serverConfigV13
// GetVersion get current config version.
func (s serverConfigV13) GetVersion() string {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Version
}
// SetRegion set new region.
func (s *serverConfigV13) SetRegion(region string) {
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
s.Region = region
}
// GetRegion get current region.
func (s serverConfigV13) GetRegion() string {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Region
}
// SetCredentials set new credentials.
func (s *serverConfigV13) SetCredential(creds credential) {
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
// Set updated credential.
s.Credential = newCredentialWithKeys(creds.AccessKey, creds.SecretKey)
}
// GetCredentials get current credentials.
func (s serverConfigV13) GetCredential() credential {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Credential
}
// Save config.
func (s serverConfigV13) Save() error {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
// get config file.
configFile, err := getConfigFile()
if err != nil {
return err
}
// initialize quick.
qc, err := quick.New(&s)
if err != nil {
return err
}
// Save config file.
return qc.Save(configFile)
}

View File

@@ -1,120 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"reflect"
"testing"
)
func TestServerConfig(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root directory after the test ends.
defer removeAll(rootPath)
if serverConfig.GetRegion() != globalMinioDefaultRegion {
t.Errorf("Expecting region `us-east-1` found %s", serverConfig.GetRegion())
}
// Set new region and verify.
serverConfig.SetRegion("us-west-1")
if serverConfig.GetRegion() != "us-west-1" {
t.Errorf("Expecting region `us-west-1` found %s", serverConfig.GetRegion())
}
// Set new amqp notification id.
serverConfig.Notify.SetAMQPByID("2", amqpNotify{})
savedNotifyCfg1 := serverConfig.Notify.GetAMQPByID("2")
if !reflect.DeepEqual(savedNotifyCfg1, amqpNotify{}) {
t.Errorf("Expecting AMQP config %#v found %#v", amqpNotify{}, savedNotifyCfg1)
}
// Set new elastic search notification id.
serverConfig.Notify.SetElasticSearchByID("2", elasticSearchNotify{})
savedNotifyCfg2 := serverConfig.Notify.GetElasticSearchByID("2")
if !reflect.DeepEqual(savedNotifyCfg2, elasticSearchNotify{}) {
t.Errorf("Expecting Elasticsearch config %#v found %#v", elasticSearchNotify{}, savedNotifyCfg2)
}
// Set new redis notification id.
serverConfig.Notify.SetRedisByID("2", redisNotify{})
savedNotifyCfg3 := serverConfig.Notify.GetRedisByID("2")
if !reflect.DeepEqual(savedNotifyCfg3, redisNotify{}) {
t.Errorf("Expecting Redis config %#v found %#v", redisNotify{}, savedNotifyCfg3)
}
// Set new kafka notification id.
serverConfig.Notify.SetKafkaByID("2", kafkaNotify{})
savedNotifyCfg4 := serverConfig.Notify.GetKafkaByID("2")
if !reflect.DeepEqual(savedNotifyCfg4, kafkaNotify{}) {
t.Errorf("Expecting Kafka config %#v found %#v", kafkaNotify{}, savedNotifyCfg4)
}
// Set new Webhook notification id.
serverConfig.Notify.SetWebhookByID("2", webhookNotify{})
savedNotifyCfg5 := serverConfig.Notify.GetWebhookByID("2")
if !reflect.DeepEqual(savedNotifyCfg5, webhookNotify{}) {
t.Errorf("Expecting Webhook config %#v found %#v", webhookNotify{}, savedNotifyCfg3)
}
// Set new console logger.
serverConfig.Logger.SetConsole(consoleLogger{
Enable: true,
})
consoleCfg := serverConfig.Logger.GetConsole()
if !reflect.DeepEqual(consoleCfg, consoleLogger{Enable: true}) {
t.Errorf("Expecting console logger config %#v found %#v", consoleLogger{Enable: true}, consoleCfg)
}
// Set new console logger.
serverConfig.Logger.SetConsole(consoleLogger{
Enable: false,
})
// Set new file logger.
serverConfig.Logger.SetFile(fileLogger{
Enable: true,
})
fileCfg := serverConfig.Logger.GetFile()
if !reflect.DeepEqual(fileCfg, fileLogger{Enable: true}) {
t.Errorf("Expecting file logger config %#v found %#v", fileLogger{Enable: true}, consoleCfg)
}
// Set new file logger.
serverConfig.Logger.SetFile(fileLogger{
Enable: false,
})
// Match version.
if serverConfig.GetVersion() != globalMinioConfigVersion {
t.Errorf("Expecting version %s found %s", serverConfig.GetVersion(), globalMinioConfigVersion)
}
// Attempt to save.
if err := serverConfig.Save(); err != nil {
t.Fatalf("Unable to save updated config file %s", err)
}
// Do this only once here.
setGlobalConfigPath(rootPath)
// Initialize server config.
if err := loadConfig(credential{}); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
}

370
cmd/config-v14.go Normal file
View File

@@ -0,0 +1,370 @@
/*
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"sync"
"github.com/minio/minio/pkg/quick"
"github.com/tidwall/gjson"
)
// Read Write mutex for safe access to ServerConfig.
var serverConfigMu sync.RWMutex
// Config version
var v14 = "14"
// serverConfigV14 server configuration version '14' which is like
// version '13' except it adds support of browser param.
type serverConfigV14 struct {
Version string `json:"version"`
// S3 API configuration.
Credential credential `json:"credential"`
Region string `json:"region"`
Browser string `json:"browser"`
// Additional error logging configuration.
Logger *logger `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
}
func newServerConfigV14() *serverConfigV14 {
srvCfg := &serverConfigV14{
Version: v14,
Region: globalMinioDefaultRegion,
Logger: &logger{},
Notify: &notifier{},
}
srvCfg.SetCredential(mustGetNewCredential())
srvCfg.SetBrowser("on")
// Enable console logger by default on a fresh run.
srvCfg.Logger.Console = consoleLogger{
Enable: true,
Level: "error",
}
// Make sure to initialize notification configs.
srvCfg.Notify.AMQP = make(map[string]amqpNotify)
srvCfg.Notify.AMQP["1"] = amqpNotify{}
srvCfg.Notify.ElasticSearch = make(map[string]elasticSearchNotify)
srvCfg.Notify.ElasticSearch["1"] = elasticSearchNotify{}
srvCfg.Notify.Redis = make(map[string]redisNotify)
srvCfg.Notify.Redis["1"] = redisNotify{}
srvCfg.Notify.NATS = make(map[string]natsNotify)
srvCfg.Notify.NATS["1"] = natsNotify{}
srvCfg.Notify.PostgreSQL = make(map[string]postgreSQLNotify)
srvCfg.Notify.PostgreSQL["1"] = postgreSQLNotify{}
srvCfg.Notify.Kafka = make(map[string]kafkaNotify)
srvCfg.Notify.Kafka["1"] = kafkaNotify{}
srvCfg.Notify.Webhook = make(map[string]webhookNotify)
srvCfg.Notify.Webhook["1"] = webhookNotify{}
return srvCfg
}
// newConfig - initialize a new server config, saves env parameters if
// found, otherwise use default parameters
func newConfig(envParams envParams) error {
// Initialize server config.
srvCfg := newServerConfigV14()
// If env is set for a fresh start, save them to config file.
if globalIsEnvCreds {
srvCfg.SetCredential(envParams.creds)
}
if globalIsEnvBrowser {
srvCfg.SetBrowser(envParams.browser)
}
// Create config path.
if err := createConfigDir(); err != nil {
return err
}
// hold the mutex lock before a new config is assigned.
// Save the new config globally.
// unlock the mutex.
serverConfigMu.Lock()
serverConfig = srvCfg
serverConfigMu.Unlock()
// Save config into file.
return serverConfig.Save()
}
// loadConfig - loads a new config from disk, overrides params from env
// if found and valid
func loadConfig(envParams envParams) error {
configFile := getConfigFile()
if _, err := os.Stat(configFile); err != nil {
return err
}
srvCfg := &serverConfigV14{}
qc, err := quick.New(srvCfg)
if err != nil {
return err
}
if err = qc.Load(configFile); err != nil {
return err
}
// If env is set override the credentials from config file.
if globalIsEnvCreds {
srvCfg.SetCredential(envParams.creds)
}
if globalIsEnvBrowser {
srvCfg.SetBrowser(envParams.browser)
}
if strings.ToLower(srvCfg.GetBrowser()) == "off" {
globalIsBrowserEnabled = false
}
// hold the mutex lock before a new config is assigned.
serverConfigMu.Lock()
// Save the loaded config globally.
serverConfig = srvCfg
serverConfigMu.Unlock()
if serverConfig.Version != v14 {
return errors.New("Unsupported config version `" + serverConfig.Version + "`.")
}
return nil
}
// doCheckDupJSONKeys recursively detects duplicate json keys
func doCheckDupJSONKeys(key, value gjson.Result) error {
// Key occurrences map of the current scope to count
// if there is any duplicated json key.
keysOcc := make(map[string]int)
// Holds the found error
var checkErr error
// Iterate over keys in the current json scope
value.ForEach(func(k, v gjson.Result) bool {
// If current key is not null, check if its
// value contains some duplicated keys.
if k.Type != gjson.Null {
keysOcc[k.String()]++
checkErr = doCheckDupJSONKeys(k, v)
}
return checkErr == nil
})
// Check found err
if checkErr != nil {
return errors.New(key.String() + " => " + checkErr.Error())
}
// Check for duplicated keys
for k, v := range keysOcc {
if v > 1 {
return errors.New(key.String() + " => `" + k + "` entry is duplicated")
}
}
return nil
}
// Check recursively if a key is duplicated in the same json scope
// e.g.:
// `{ "key" : { "key" ..` is accepted
// `{ "key" : { "subkey" : "val1", "subkey": "val2" ..` throws subkey duplicated error
func checkDupJSONKeys(json string) error {
// Parse config with gjson library
config := gjson.Parse(json)
// Create a fake rootKey since root json doesn't seem to have representation
// in gjson library.
rootKey := gjson.Result{Type: gjson.String, Str: minioConfigFile}
// Check if loaded json contains any duplicated keys
return doCheckDupJSONKeys(rootKey, config)
}
// validateConfig checks for
func validateConfig() error {
// Get file config path
configFile := getConfigFile()
srvCfg := &serverConfigV14{}
// Load config file
qc, err := quick.New(srvCfg)
if err != nil {
return err
}
if err = qc.Load(configFile); err != nil {
return err
}
// Check if config version is valid
if srvCfg.GetVersion() != v14 {
return errors.New("bad config version, expected: " + v14)
}
// Load config file json and check for duplication json keys
jsonBytes, err := ioutil.ReadFile(configFile)
if err != nil {
return err
}
if err := checkDupJSONKeys(string(jsonBytes)); err != nil {
return err
}
// Validate region field
if srvCfg.GetRegion() == "" {
return errors.New("Region config value cannot be empty")
}
// Validate browser field
if b := strings.ToLower(srvCfg.GetBrowser()); b != "on" && b != "off" {
return fmt.Errorf("Browser config value %s is invalid", b)
}
// Validate credential field
if !srvCfg.Credential.IsValid() {
return errors.New("invalid credential")
}
// Validate logger field
if err := srvCfg.Logger.Validate(); err != nil {
return err
}
// Validate notify field
if err := srvCfg.Notify.Validate(); err != nil {
return err
}
return nil
}
// serverConfig server config.
var serverConfig *serverConfigV14
// GetVersion get current config version.
func (s serverConfigV14) GetVersion() string {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Version
}
// SetRegion set new region.
func (s *serverConfigV14) SetRegion(region string) {
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
// Empty region means "us-east-1" by default from S3 spec.
if region == "" {
region = "us-east-1"
}
s.Region = region
}
// GetRegion get current region.
func (s serverConfigV14) GetRegion() string {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
if s.Region != "" {
return s.Region
} // region empty
// Empty region means "us-east-1" by default from S3 spec.
return "us-east-1"
}
// SetCredentials set new credentials.
func (s *serverConfigV14) SetCredential(creds credential) {
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
// Set updated credential.
s.Credential = creds
}
// GetCredentials get current credentials.
func (s serverConfigV14) GetCredential() credential {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Credential
}
// SetBrowser set if browser is enabled.
func (s *serverConfigV14) SetBrowser(v string) {
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
// Set browser param
if v == "" {
v = "on" // Browser is on by default.
}
// Set the new value.
s.Browser = v
}
// GetCredentials get current credentials.
func (s serverConfigV14) GetBrowser() string {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
if s.Browser != "" {
return s.Browser
} // empty browser.
// Empty browser means "on" by default.
return "on"
}
// Save config.
func (s serverConfigV14) Save() error {
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
// get config file.
configFile := getConfigFile()
// initialize quick.
qc, err := quick.New(&s)
if err != nil {
return err
}
// Save config file.
return qc.Save(configFile)
}

292
cmd/config-v14_test.go Normal file
View File

@@ -0,0 +1,292 @@
/*
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
"github.com/tidwall/gjson"
)
func TestServerConfig(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root directory after the test ends.
defer removeAll(rootPath)
if serverConfig.GetRegion() != globalMinioDefaultRegion {
t.Errorf("Expecting region `us-east-1` found %s", serverConfig.GetRegion())
}
// Set new region and verify.
serverConfig.SetRegion("us-west-1")
if serverConfig.GetRegion() != "us-west-1" {
t.Errorf("Expecting region `us-west-1` found %s", serverConfig.GetRegion())
}
// Set new amqp notification id.
serverConfig.Notify.SetAMQPByID("2", amqpNotify{})
savedNotifyCfg1 := serverConfig.Notify.GetAMQPByID("2")
if !reflect.DeepEqual(savedNotifyCfg1, amqpNotify{}) {
t.Errorf("Expecting AMQP config %#v found %#v", amqpNotify{}, savedNotifyCfg1)
}
// Set new elastic search notification id.
serverConfig.Notify.SetElasticSearchByID("2", elasticSearchNotify{})
savedNotifyCfg2 := serverConfig.Notify.GetElasticSearchByID("2")
if !reflect.DeepEqual(savedNotifyCfg2, elasticSearchNotify{}) {
t.Errorf("Expecting Elasticsearch config %#v found %#v", elasticSearchNotify{}, savedNotifyCfg2)
}
// Set new redis notification id.
serverConfig.Notify.SetRedisByID("2", redisNotify{})
savedNotifyCfg3 := serverConfig.Notify.GetRedisByID("2")
if !reflect.DeepEqual(savedNotifyCfg3, redisNotify{}) {
t.Errorf("Expecting Redis config %#v found %#v", redisNotify{}, savedNotifyCfg3)
}
// Set new kafka notification id.
serverConfig.Notify.SetKafkaByID("2", kafkaNotify{})
savedNotifyCfg4 := serverConfig.Notify.GetKafkaByID("2")
if !reflect.DeepEqual(savedNotifyCfg4, kafkaNotify{}) {
t.Errorf("Expecting Kafka config %#v found %#v", kafkaNotify{}, savedNotifyCfg4)
}
// Set new Webhook notification id.
serverConfig.Notify.SetWebhookByID("2", webhookNotify{})
savedNotifyCfg5 := serverConfig.Notify.GetWebhookByID("2")
if !reflect.DeepEqual(savedNotifyCfg5, webhookNotify{}) {
t.Errorf("Expecting Webhook config %#v found %#v", webhookNotify{}, savedNotifyCfg3)
}
// Set new console logger.
serverConfig.Logger.SetConsole(consoleLogger{
Enable: true,
})
consoleCfg := serverConfig.Logger.GetConsole()
if !reflect.DeepEqual(consoleCfg, consoleLogger{Enable: true}) {
t.Errorf("Expecting console logger config %#v found %#v", consoleLogger{Enable: true}, consoleCfg)
}
// Set new console logger.
serverConfig.Logger.SetConsole(consoleLogger{
Enable: false,
})
// Set new file logger.
serverConfig.Logger.SetFile(fileLogger{
Enable: true,
})
fileCfg := serverConfig.Logger.GetFile()
if !reflect.DeepEqual(fileCfg, fileLogger{Enable: true}) {
t.Errorf("Expecting file logger config %#v found %#v", fileLogger{Enable: true}, consoleCfg)
}
// Set new file logger.
serverConfig.Logger.SetFile(fileLogger{
Enable: false,
})
// Match version.
if serverConfig.GetVersion() != v14 {
t.Errorf("Expecting version %s found %s", serverConfig.GetVersion(), v14)
}
// Attempt to save.
if err := serverConfig.Save(); err != nil {
t.Fatalf("Unable to save updated config file %s", err)
}
// Do this only once here.
setConfigDir(rootPath)
// Initialize server config.
if err := loadConfig(envParams{}); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
}
func TestServerConfigWithEnvs(t *testing.T) {
os.Setenv("MINIO_BROWSER", "off")
defer os.Unsetenv("MINIO_BROWSER")
os.Setenv("MINIO_ACCESS_KEY", "minio")
defer os.Unsetenv("MINIO_ACCESS_KEY")
os.Setenv("MINIO_SECRET_KEY", "minio123")
defer os.Unsetenv("MINIO_SECRET_KEY")
defer func() {
globalIsEnvBrowser = false
globalIsEnvCreds = false
}()
// Get test root.
rootPath, err := getTestRoot()
if err != nil {
t.Error(err)
}
// Do this only once here.
setConfigDir(rootPath)
// Init config
initConfig()
// remove the root directory after the test ends.
defer removeAll(rootPath)
// Check if serverConfig has
if serverConfig.GetBrowser() != "off" {
t.Errorf("Expecting browser `off` found %s", serverConfig.GetBrowser())
}
// Check if serverConfig has
cred := serverConfig.GetCredential()
if cred.AccessKey != "minio" {
t.Errorf("Expecting access key to be `minio` found %s", cred.AccessKey)
}
if cred.SecretKey != "minio123" {
t.Errorf("Expecting access key to be `minio123` found %s", cred.SecretKey)
}
}
func TestCheckDupJSONKeys(t *testing.T) {
testCases := []struct {
json string
shouldPass bool
}{
{`{}`, true},
{`{"version" : "13"}`, true},
{`{"version" : "13", "version": "14"}`, false},
{`{"version" : "13", "credential": {"accessKey": "12345"}}`, true},
{`{"version" : "13", "credential": {"accessKey": "12345", "accessKey":"12345"}}`, false},
{`{"version" : "13", "notify": {"amqp": {"1"}, "webhook":{"3"}}}`, true},
{`{"version" : "13", "notify": {"amqp": {"1"}, "amqp":{"3"}}}`, false},
{`{"version" : "13", "notify": {"amqp": {"1":{}, "2":{}}}}`, true},
{`{"version" : "13", "notify": {"amqp": {"1":{}, "1":{}}}}`, false},
}
for i, testCase := range testCases {
err := doCheckDupJSONKeys(gjson.Result{}, gjson.Parse(testCase.json))
if testCase.shouldPass && err != nil {
t.Errorf("Test %d, should pass but it failed with err = %v", i+1, err)
}
if !testCase.shouldPass && err == nil {
t.Errorf("Test %d, should fail but it succeed.", i+1)
}
}
}
// Tests config validator..
func TestValidateConfig(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root directory after the test ends.
defer removeAll(rootPath)
configPath := filepath.Join(rootPath, minioConfigFile)
v := v14
testCases := []struct {
configData string
shouldPass bool
}{
// Test 1 - wrong json
{`{`, false},
// Test 2 - empty json
{`{}`, false},
// Test 3 - wrong config version
{`{"version": "10"}`, false},
// Test 4 - wrong browser parameter
{`{"version": "` + v + `", "browser": "foo"}`, false},
// Test 5 - missing credential
{`{"version": "` + v + `", "browser": "on"}`, false},
// Test 6 - missing secret key
{`{"version": "` + v + `", "browser": "on", "credential" : {"accessKey":"minio", "secretKey":""}}`, false},
// Test 7 - missing region should pass, defaults to 'us-east-1'.
{`{"version": "` + v + `", "browser": "on", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, true},
// Test 8 - missing browser should pass, defaults to 'on'.
{`{"version": "` + v + `", "region": "us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, true},
// Test 9 - success
{`{"version": "` + v + `", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, true},
// Test 10 - duplicated json keys
{`{"version": "` + v + `", "browser": "on", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, false},
// Test 11 - Wrong Console logger level
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "logger": { "console": { "enable": true, "level": "foo" } }}`, false},
// Test 12 - Wrong File logger level
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "logger": { "file": { "enable": true, "level": "foo" } }}`, false},
// Test 13 - Test AMQP
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false},
// Test 14 - Test NATS
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "clientID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
// Test 15 - Test ElasticSearch
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false},
// Test 16 - Test Redis
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, false},
// Test 17 - Test PostgreSQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
// Test 18 - Test Kafka
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, false},
// Test 19 - Test Webhook
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, false},
}
for i, testCase := range testCases {
if werr := ioutil.WriteFile(configPath, []byte(testCase.configData), 0700); werr != nil {
t.Fatal(werr)
}
verr := validateConfig()
if testCase.shouldPass && verr != nil {
t.Errorf("Test %d, should pass but it failed with err = %v", i+1, verr)
}
if !testCase.shouldPass && verr == nil {
t.Errorf("Test %d, should fail but it succeed.", i+1)
}
}
}

View File

@@ -1,93 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"os"
"path/filepath"
"sync"
"github.com/minio/go-homedir"
)
// configPath for custom config path only for testing purposes
var customConfigPath string
var configMu sync.Mutex
// Sets a new config path.
func setGlobalConfigPath(configPath string) {
configMu.Lock()
defer configMu.Unlock()
customConfigPath = configPath
}
// getConfigPath get server config path
func getConfigPath() (string, error) {
configMu.Lock()
defer configMu.Unlock()
if customConfigPath != "" {
return customConfigPath, nil
}
homeDir, err := homedir.Dir()
if err != nil {
return "", err
}
configPath := filepath.Join(homeDir, globalMinioConfigDir)
return configPath, nil
}
// mustGetConfigPath must get server config path.
func mustGetConfigPath() string {
configPath, err := getConfigPath()
if err != nil {
return ""
}
return configPath
}
// createConfigPath create server config path.
func createConfigPath() error {
configPath, err := getConfigPath()
if err != nil {
return err
}
return os.MkdirAll(configPath, 0700)
}
// isConfigFileExists - returns true if config file exists.
func isConfigFileExists() bool {
path, err := getConfigFile()
if err != nil {
return false
}
st, err := os.Stat(path)
// If file exists and is regular return true.
if err == nil && st.Mode().IsRegular() {
return true
}
return false
}
// getConfigFile get server config file.
func getConfigFile() (string, error) {
configPath, err := getConfigPath()
if err != nil {
return "", err
}
return filepath.Join(configPath, globalMinioConfigFile), nil
}

106
cmd/copy-part-range.go Normal file
View File

@@ -0,0 +1,106 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
)
// Writes S3 compatible copy part range error.
func writeCopyPartErr(w http.ResponseWriter, err error, url *url.URL) {
switch err {
case errInvalidRange:
writeErrorResponse(w, ErrInvalidCopyPartRange, url)
return
case errInvalidRangeSource:
writeErrorResponse(w, ErrInvalidCopyPartRangeSource, url)
return
default:
writeErrorResponse(w, ErrInternalError, url)
return
}
}
// Parses x-amz-copy-source-range for CopyObjectPart API. Specifically written to
// differentiate the behavior between regular httpRange header v/s x-amz-copy-source-range.
// The range of bytes to copy from the source object. The range value must use the form
// bytes=first-last, where the first and last are the zero-based byte offsets to copy.
// For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source.
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
func parseCopyPartRange(rangeString string, resourceSize int64) (hrange *httpRange, err error) {
// Return error if given range string doesn't start with byte range prefix.
if !strings.HasPrefix(rangeString, byteRangePrefix) {
return nil, fmt.Errorf("'%s' does not start with '%s'", rangeString, byteRangePrefix)
}
// Trim byte range prefix.
byteRangeString := strings.TrimPrefix(rangeString, byteRangePrefix)
// Check if range string contains delimiter '-', else return error. eg. "bytes=8"
sepIndex := strings.Index(byteRangeString, "-")
if sepIndex == -1 {
return nil, errInvalidRange
}
offsetBeginString := byteRangeString[:sepIndex]
offsetBegin := int64(-1)
// Convert offsetBeginString only if its not empty.
if len(offsetBeginString) > 0 {
if !validBytePos.MatchString(offsetBeginString) {
return nil, errInvalidRange
}
if offsetBegin, err = strconv.ParseInt(offsetBeginString, 10, 64); err != nil {
return nil, errInvalidRange
}
}
offsetEndString := byteRangeString[sepIndex+1:]
offsetEnd := int64(-1)
// Convert offsetEndString only if its not empty.
if len(offsetEndString) > 0 {
if !validBytePos.MatchString(offsetEndString) {
return nil, errInvalidRange
}
if offsetEnd, err = strconv.ParseInt(offsetEndString, 10, 64); err != nil {
return nil, errInvalidRange
}
}
// rangeString contains first byte positions. eg. "bytes=2-" or
// rangeString contains last bye positions. eg. "bytes=-2"
if offsetBegin == -1 || offsetEnd == -1 {
return nil, errInvalidRange
}
// Last byte position should not be greater than first byte
// position. eg. "bytes=5-2"
if offsetBegin > offsetEnd {
return nil, errInvalidRange
}
// First and last byte positions should not be >= resourceSize.
if offsetBegin >= resourceSize || offsetEnd >= resourceSize {
return nil, errInvalidRangeSource
}
// Success..
return &httpRange{offsetBegin, offsetEnd, resourceSize}, nil
}

View File

@@ -0,0 +1,85 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import "testing"
// Test parseCopyPartRange()
func TestParseCopyPartRange(t *testing.T) {
// Test success cases.
successCases := []struct {
rangeString string
offsetBegin int64
offsetEnd int64
length int64
}{
{"bytes=2-5", 2, 5, 4},
{"bytes=2-9", 2, 9, 8},
{"bytes=2-2", 2, 2, 1},
{"bytes=0000-0006", 0, 6, 7},
}
for _, successCase := range successCases {
hrange, err := parseCopyPartRange(successCase.rangeString, 10)
if err != nil {
t.Fatalf("expected: <nil>, got: %s", err)
}
if hrange.offsetBegin != successCase.offsetBegin {
t.Fatalf("expected: %d, got: %d", successCase.offsetBegin, hrange.offsetBegin)
}
if hrange.offsetEnd != successCase.offsetEnd {
t.Fatalf("expected: %d, got: %d", successCase.offsetEnd, hrange.offsetEnd)
}
if hrange.getLength() != successCase.length {
t.Fatalf("expected: %d, got: %d", successCase.length, hrange.getLength())
}
}
// Test invalid range strings.
invalidRangeStrings := []string{
"bytes=8",
"bytes=5-2",
"bytes=+2-5",
"bytes=2-+5",
"bytes=2--5",
"bytes=-",
"",
"2-5",
"bytes = 2-5",
"bytes=2 - 5",
"bytes=0-0,-1",
"bytes=2-5 ",
}
for _, rangeString := range invalidRangeStrings {
if _, err := parseCopyPartRange(rangeString, 10); err == nil {
t.Fatalf("expected: an error, got: <nil> for range %s", rangeString)
}
}
// Test error range strings.
errorRangeString := []string{
"bytes=10-10",
"bytes=20-30",
}
for _, rangeString := range errorRangeString {
if _, err := parseCopyPartRange(rangeString, 10); err != errInvalidRangeSource {
t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err)
}
}
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ package cmd
import (
"crypto/rand"
"encoding/base64"
"os"
"errors"
"github.com/minio/mc/pkg/console"
@@ -27,15 +27,20 @@ import (
)
const (
accessKeyMinLen = 5
accessKeyMaxLen = 20
secretKeyMinLen = 8
secretKeyMaxLen = 40
alphaNumericTable = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
alphaNumericTableLen = byte(len(alphaNumericTable))
accessKeyMinLen = 5
accessKeyMaxLen = 20
secretKeyMinLen = 8
secretKeyMaxLenAmazon = 40
alphaNumericTable = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
alphaNumericTableLen = byte(len(alphaNumericTable))
)
var (
errInvalidAccessKeyLength = errors.New("Invalid access key, access key should be 5 to 20 characters in length")
errInvalidSecretKeyLength = errors.New("Invalid secret key, secret key should be 8 to 40 characters in length")
)
var secretKeyMaxLen = secretKeyMaxLenAmazon
func mustGetAccessKey() string {
keyBytes := make([]byte, accessKeyMaxLen)
if _, err := rand.Read(keyBytes); err != nil {
@@ -75,65 +80,72 @@ type credential struct {
secretKeyHash []byte
}
// Generate a bcrypt hashed key for input secret key.
func mustGetHashedSecretKey(secretKey string) []byte {
hashedSecretKey, err := bcrypt.GenerateFromPassword([]byte(secretKey), bcrypt.DefaultCost)
if err != nil {
console.Fatalf("Unable to generate secret hash for secret key. Err: %s.\n", err)
// IsValid - returns whether credential is valid or not.
func (cred credential) IsValid() bool {
return isAccessKeyValid(cred.AccessKey) && isSecretKeyValid(cred.SecretKey)
}
// Equals - returns whether two credentials are equal or not.
func (cred credential) Equal(ccred credential) bool {
if !ccred.IsValid() {
return false
}
return hashedSecretKey
if cred.secretKeyHash == nil {
secretKeyHash, err := bcrypt.GenerateFromPassword([]byte(cred.SecretKey), bcrypt.DefaultCost)
if err != nil {
errorIf(err, "Unable to generate hash of given password")
return false
}
cred.secretKeyHash = secretKeyHash
}
return (cred.AccessKey == ccred.AccessKey &&
bcrypt.CompareHashAndPassword(cred.secretKeyHash, []byte(ccred.SecretKey)) == nil)
}
func createCredential(accessKey, secretKey string) (cred credential, err error) {
if !isAccessKeyValid(accessKey) {
err = errInvalidAccessKeyLength
} else if !isSecretKeyValid(secretKey) {
err = errInvalidSecretKeyLength
} else {
var secretKeyHash []byte
secretKeyHash, err = bcrypt.GenerateFromPassword([]byte(secretKey), bcrypt.DefaultCost)
if err == nil {
cred.AccessKey = accessKey
cred.SecretKey = secretKey
cred.secretKeyHash = secretKeyHash
}
}
return cred, err
}
// Initialize a new credential object
func newCredential() credential {
return newCredentialWithKeys(mustGetAccessKey(), mustGetSecretKey())
}
func newCredentialWithKeys(accessKey, secretKey string) credential {
secretHash := mustGetHashedSecretKey(secretKey)
return credential{accessKey, secretKey, secretHash}
}
// Validate incoming auth keys.
func validateAuthKeys(accessKey, secretKey string) error {
// Validate the env values before proceeding.
if !isAccessKeyValid(accessKey) {
return errInvalidAccessKeyLength
func mustGetNewCredential() credential {
// Generate access key.
keyBytes := make([]byte, accessKeyMaxLen)
if _, err := rand.Read(keyBytes); err != nil {
console.Fatalln("Unable to generate access key.", err)
}
if !isSecretKeyValid(secretKey) {
return errInvalidSecretKeyLength
for i := 0; i < accessKeyMaxLen; i++ {
keyBytes[i] = alphaNumericTable[keyBytes[i]%alphaNumericTableLen]
}
return nil
}
accessKey := string(keyBytes)
// Variant of getCredentialFromEnv but upon error fails right here.
func mustGetCredentialFromEnv() credential {
creds, err := getCredentialFromEnv()
// Generate secret key.
keyBytes = make([]byte, secretKeyMaxLen)
if _, err := rand.Read(keyBytes); err != nil {
console.Fatalln("Unable to generate secret key.", err)
}
secretKey := string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen])
cred, err := createCredential(accessKey, secretKey)
if err != nil {
console.Fatalf("Unable to load credentials from environment. Err: %s.\n", err)
}
return creds
}
// Converts accessKey and secretKeys into credential object which
// contains bcrypt secret key hash for future validation.
func getCredentialFromEnv() (credential, error) {
// Fetch access keys from environment variables and update the config.
accessKey := os.Getenv("MINIO_ACCESS_KEY")
secretKey := os.Getenv("MINIO_SECRET_KEY")
// Envs are set globally.
globalIsEnvCreds = accessKey != "" && secretKey != ""
if globalIsEnvCreds {
// Validate the env values before proceeding.
if err := validateAuthKeys(accessKey, secretKey); err != nil {
return credential{}, err
}
// Return credential object.
return newCredentialWithKeys(accessKey, secretKey), nil
console.Fatalln("Unable to generate new credential.", err)
}
return credential{}, nil
return cred
}

101
cmd/credential_test.go Normal file
View File

@@ -0,0 +1,101 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import "testing"
func TestMustGetNewCredential(t *testing.T) {
cred := mustGetNewCredential()
if !cred.IsValid() {
t.Fatalf("Failed to get new valid credential")
}
}
func TestCreateCredential(t *testing.T) {
cred := mustGetNewCredential()
testCases := []struct {
accessKey string
secretKey string
expectedResult bool
expectedErr error
}{
// Access key too small.
{"user", "pass", false, errInvalidAccessKeyLength},
// Access key too long.
{"user12345678901234567", "pass", false, errInvalidAccessKeyLength},
// Access key contains unsuppported characters.
{"!@#$", "pass", false, errInvalidAccessKeyLength},
// Secret key too small.
{"myuser", "pass", false, errInvalidSecretKeyLength},
// Secret key too long.
{"myuser", "pass1234567890123456789012345678901234567", false, errInvalidSecretKeyLength},
// Success when access key contains leading/trailing spaces.
{" user ", cred.SecretKey, true, nil},
{"myuser", "mypassword", true, nil},
{cred.AccessKey, cred.SecretKey, true, nil},
}
for _, testCase := range testCases {
cred, err := createCredential(testCase.accessKey, testCase.secretKey)
if testCase.expectedErr == nil {
if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else if testCase.expectedErr.Error() != err.Error() {
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
if testCase.expectedResult != cred.IsValid() {
t.Fatalf("cred: expected: %v, got: %v", testCase.expectedResult, cred.IsValid())
}
}
}
func TestCredentialEqual(t *testing.T) {
cred := mustGetNewCredential()
testCases := []struct {
cred credential
ccred credential
expectedResult bool
}{
// Empty compare credential
{cred, credential{}, false},
// Empty credential
{credential{}, cred, false},
// Two different credentials
{cred, mustGetNewCredential(), false},
// Access key is different in compare credential.
{cred, credential{AccessKey: "myuser", SecretKey: cred.SecretKey}, false},
// Secret key is different in compare credential.
{cred, credential{AccessKey: cred.AccessKey, SecretKey: "mypassword"}, false},
// secretHashKey is missing in compare credential.
{cred, credential{AccessKey: cred.AccessKey, SecretKey: cred.SecretKey}, true},
// secretHashKey is missing in credential.
{credential{AccessKey: cred.AccessKey, SecretKey: cred.SecretKey}, cred, true},
// Same credentials.
{cred, cred, true},
}
for _, testCase := range testCases {
result := testCase.cred.Equal(testCase.ccred)
if result != testCase.expectedResult {
t.Fatalf("cred: expected: %v, got: %v", testCase.expectedResult, result)
}
}
}

View File

@@ -114,6 +114,7 @@ func appendFile(disks []StorageAPI, volume, path string, enBlocks [][]byte, hash
// Write encoded data to quorum disks in parallel.
for index, disk := range disks {
if disk == nil {
wErrs[index] = traceError(errDiskNotFound)
continue
}
wg.Add(1)
@@ -123,6 +124,8 @@ func appendFile(disks []StorageAPI, volume, path string, enBlocks [][]byte, hash
wErr := disk.AppendFile(volume, path, enBlocks[index])
if wErr != nil {
wErrs[index] = traceError(wErr)
// Ignore disk which returned an error.
disks[index] = nil
return
}

View File

@@ -192,7 +192,7 @@ func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path s
}()
// Total bytes written to writer
bytesWritten := int64(0)
var bytesWritten int64
startBlock := offset / blockSize
endBlock := (offset + length) / blockSize
@@ -263,7 +263,7 @@ func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path s
}
// Offset in enBlocks from where data should be read from.
enBlocksOffset := int64(0)
var enBlocksOffset int64
// Total data to be read from enBlocks.
enBlocksLength := curBlockSize

View File

@@ -121,35 +121,6 @@ func testGetReadDisks(t *testing.T, xl *xlObjects) {
}
}
// Test getOrderedDisks which returns ordered slice of disks from their
// actual distribution.
func testGetOrderedDisks(t *testing.T, xl *xlObjects) {
disks := xl.storageDisks
distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15}
orderedDisks := getOrderedDisks(distribution, disks)
// From the "distribution" above you can notice that:
// 1st data block is in the 9th disk (i.e distribution index 8)
// 2nd data block is in the 8th disk (i.e distribution index 7) and so on.
if orderedDisks[0] != disks[8] ||
orderedDisks[1] != disks[7] ||
orderedDisks[2] != disks[9] ||
orderedDisks[3] != disks[6] ||
orderedDisks[4] != disks[10] ||
orderedDisks[5] != disks[5] ||
orderedDisks[6] != disks[11] ||
orderedDisks[7] != disks[4] ||
orderedDisks[8] != disks[12] ||
orderedDisks[9] != disks[3] ||
orderedDisks[10] != disks[13] ||
orderedDisks[11] != disks[2] ||
orderedDisks[12] != disks[14] ||
orderedDisks[13] != disks[1] ||
orderedDisks[14] != disks[15] ||
orderedDisks[15] != disks[0] {
t.Errorf("getOrderedDisks returned incorrect order.")
}
}
// Test for isSuccessDataBlocks and isSuccessDecodeBlocks.
func TestIsSuccessBlocks(t *testing.T) {
dataBlocks := 8
@@ -217,7 +188,7 @@ func TestIsSuccessBlocks(t *testing.T) {
}
}
// Wrapper function for testGetReadDisks, testGetOrderedDisks.
// Wrapper function for testGetReadDisks, testShuffleDisks.
func TestErasureReadUtils(t *testing.T) {
nDisks := 16
disks, err := getRandomDisks(nDisks)
@@ -236,7 +207,6 @@ func TestErasureReadUtils(t *testing.T) {
defer removeRoots(disks)
xl := objLayer.(*xlObjects)
testGetReadDisks(t, xl)
testGetOrderedDisks(t, xl)
}
// Simulates a faulty disk for ReadFile()

View File

@@ -116,7 +116,7 @@ func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset in
write := length
// Counter to increment total written.
totalWritten := int64(0)
var totalWritten int64
// Write all data blocks to dst.
for _, block := range enBlocks[:dataBlocks] {
@@ -180,7 +180,7 @@ func copyBuffer(writer io.Writer, disk StorageAPI, volume string, path string, b
}
// Starting offset for Reading the file.
startOffset := int64(0)
var startOffset int64
// Read until io.EOF.
for {

View File

@@ -86,10 +86,10 @@ func traceError(e error, errs ...error) error {
fn := runtime.FuncForPC(pc)
file, line := fn.FileLine(pc)
name := fn.Name()
if strings.HasSuffix(name, "ServeHTTP") {
if hasSuffix(name, "ServeHTTP") {
break
}
if strings.HasSuffix(name, "runtime.") {
if hasSuffix(name, "runtime.") {
break
}

View File

@@ -313,6 +313,9 @@ func eventNotifyForBucketListeners(eventType, objectName, bucketName string,
// eventNotify notifies an event to relevant targets based on their
// bucket configuration (notifications and listeners).
func eventNotify(event eventData) {
if globalEventNotifier == nil {
return
}
// Notifies a new event.
// List of events reported through this function are
// - s3:ObjectCreated:Put
@@ -537,6 +540,28 @@ func loadAllBucketNotifications(objAPI ObjectLayer) (map[string]*notificationCon
return nConfigs, lConfigs, nil
}
// addQueueTarget - calls newTargetFunc function and adds its returned value to queueTargets
func addQueueTarget(queueTargets map[string]*logrus.Logger,
accountID, queueType string,
newTargetFunc func(string) (*logrus.Logger, error)) (string, error) {
// Construct the queue ARN for AMQP.
queueARN := minioSqs + serverConfig.GetRegion() + ":" + accountID + ":" + queueType
// Queue target if already initialized we move to the next ARN.
if _, ok := queueTargets[queueARN]; ok {
return queueARN, nil
}
// Using accountID we can now initialize a new AMQP logrus instance.
logger, err := newTargetFunc(accountID)
if err == nil {
queueTargets[queueARN] = logger
}
return queueARN, err
}
// Loads all queue targets, initializes each queueARNs depending on their config.
// Each instance of queueARN registers its own logrus to communicate with the
// queue service. QueueARN once initialized is not initialized again for the
@@ -548,54 +573,37 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
if !amqpN.Enable {
continue
}
// Construct the queue ARN for AMQP.
queueARN := minioSqs + serverConfig.GetRegion() + ":" + accountID + ":" + queueTypeAMQP
// Queue target if already initialized we move to the next ARN.
_, ok := queueTargets[queueARN]
if ok {
continue
}
// Using accountID we can now initialize a new AMQP logrus instance.
amqpLog, err := newAMQPNotify(accountID)
if err != nil {
// Encapsulate network error to be more informative.
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeAMQP, newAMQPNotify); err != nil {
if _, ok := err.(net.Error); ok {
return nil, &net.OpError{
err = &net.OpError{
Op: "Connecting to " + queueARN,
Net: "tcp",
Err: err,
}
}
return nil, err
}
queueTargets[queueARN] = amqpLog
}
// Load all nats targets, initialize their respective loggers.
for accountID, natsN := range serverConfig.Notify.GetNATS() {
if !natsN.Enable {
continue
}
// Construct the queue ARN for NATS.
queueARN := minioSqs + serverConfig.GetRegion() + ":" + accountID + ":" + queueTypeNATS
// Queue target if already initialized we move to the next ARN.
_, ok := queueTargets[queueARN]
if ok {
continue
}
// Using accountID we can now initialize a new NATS logrus instance.
natsLog, err := newNATSNotify(accountID)
if err != nil {
// Encapsulate network error to be more informative.
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeNATS, newNATSNotify); err != nil {
if _, ok := err.(net.Error); ok {
return nil, &net.OpError{
err = &net.OpError{
Op: "Connecting to " + queueARN,
Net: "tcp",
Err: err,
}
}
return nil, err
}
queueTargets[queueARN] = natsLog
}
// Load redis targets, initialize their respective loggers.
@@ -603,27 +611,18 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
if !redisN.Enable {
continue
}
// Construct the queue ARN for Redis.
queueARN := minioSqs + serverConfig.GetRegion() + ":" + accountID + ":" + queueTypeRedis
// Queue target if already initialized we move to the next ARN.
_, ok := queueTargets[queueARN]
if ok {
continue
}
// Using accountID we can now initialize a new Redis logrus instance.
redisLog, err := newRedisNotify(accountID)
if err != nil {
// Encapsulate network error to be more informative.
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeRedis, newRedisNotify); err != nil {
if _, ok := err.(net.Error); ok {
return nil, &net.OpError{
err = &net.OpError{
Op: "Connecting to " + queueARN,
Net: "tcp",
Err: err,
}
}
return nil, err
}
queueTargets[queueARN] = redisLog
}
// Load Webhook targets, initialize their respective loggers.
@@ -631,20 +630,10 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
if !webhookN.Enable {
continue
}
// Construct the queue ARN for Webhook.
queueARN := minioSqs + serverConfig.GetRegion() + ":" + accountID + ":" + queueTypeWebhook
_, ok := queueTargets[queueARN]
if ok {
continue
}
// Using accountID we can now initialize a new Webhook logrus instance.
webhookLog, err := newWebhookNotify(accountID)
if err != nil {
if _, err := addQueueTarget(queueTargets, accountID, queueTypeWebhook, newWebhookNotify); err != nil {
return nil, err
}
queueTargets[queueARN] = webhookLog
}
// Load elastic targets, initialize their respective loggers.
@@ -652,25 +641,18 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
if !elasticN.Enable {
continue
}
// Construct the queue ARN for Elastic.
queueARN := minioSqs + serverConfig.GetRegion() + ":" + accountID + ":" + queueTypeElastic
_, ok := queueTargets[queueARN]
if ok {
continue
}
// Using accountID we can now initialize a new ElasticSearch logrus instance.
elasticLog, err := newElasticNotify(accountID)
if err != nil {
// Encapsulate network error to be more informative.
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeElastic, newElasticNotify); err != nil {
if _, ok := err.(net.Error); ok {
return nil, &net.OpError{
Op: "Connecting to " + queueARN, Net: "tcp",
err = &net.OpError{
Op: "Connecting to " + queueARN,
Net: "tcp",
Err: err,
}
}
return nil, err
}
queueTargets[queueARN] = elasticLog
}
// Load PostgreSQL targets, initialize their respective loggers.
@@ -678,50 +660,37 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
if !pgN.Enable {
continue
}
// Construct the queue ARN for Postgres.
queueARN := minioSqs + serverConfig.GetRegion() + ":" + accountID + ":" + queueTypePostgreSQL
_, ok := queueTargets[queueARN]
if ok {
continue
}
// Using accountID initialize a new Postgresql logrus instance.
pgLog, err := newPostgreSQLNotify(accountID)
if err != nil {
// Encapsulate network error to be more informative.
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypePostgreSQL, newPostgreSQLNotify); err != nil {
if _, ok := err.(net.Error); ok {
return nil, &net.OpError{
Op: "Connecting to " + queueARN, Net: "tcp",
err = &net.OpError{
Op: "Connecting to " + queueARN,
Net: "tcp",
Err: err,
}
}
return nil, err
}
queueTargets[queueARN] = pgLog
}
// Load Kafka targets, initialize their respective loggers.
for accountID, kafkaN := range serverConfig.Notify.GetKafka() {
if !kafkaN.Enable {
continue
}
// Construct the queue ARN for Kafka.
queueARN := minioSqs + serverConfig.GetRegion() + ":" + accountID + ":" + queueTypeKafka
_, ok := queueTargets[queueARN]
if ok {
continue
}
// Using accountID initialize a new Kafka logrus instance.
kafkaLog, err := newKafkaNotify(accountID)
if err != nil {
// Encapsulate network error to be more informative.
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeKafka, newKafkaNotify); err != nil {
if _, ok := err.(net.Error); ok {
return nil, &net.OpError{
Op: "Connecting to " + queueARN, Net: "tcp",
err = &net.OpError{
Op: "Connecting to " + queueARN,
Net: "tcp",
Err: err,
}
}
return nil, err
}
queueTargets[queueARN] = kafkaLog
}
// Successfully initialized queue targets.

View File

@@ -213,9 +213,7 @@ func genFormatXLInvalidDisksOrder() []*formatConfigV1 {
}
// Re order jbod for failure case.
var jbod1 = make([]string, 8)
for i, j := range jbod {
jbod1[i] = j
}
copy(jbod1, jbod)
jbod1[1], jbod1[2] = jbod[2], jbod[1]
formatConfigs[2].XL.JBOD = jbod1
return formatConfigs
@@ -576,9 +574,7 @@ func TestSavedUUIDOrder(t *testing.T) {
}
// Re order jbod for failure case.
var jbod1 = make([]string, 8)
for i, j := range jbod {
jbod1[i] = j
}
copy(jbod1, jbod)
jbod1[1], jbod1[2] = jbod[2], jbod[1]
formatConfigs[2].XL.JBOD = jbod1
uuidTestCases[1].shouldPass = false

View File

@@ -210,7 +210,7 @@ func (fs fsObjects) appendParts(bucket, object, uploadID string, info bgAppendPa
func (fs fsObjects) appendPart(bucket, object, uploadID string, part objectPartInfo, buf []byte) error {
partPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadID, part.Name)
offset := int64(0)
var offset int64
// Read each file part to start writing to the temporary concatenated object.
file, size, err := fsOpenFile(partPath, offset)
if err != nil {

View File

@@ -228,20 +228,24 @@ func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
}
// Creates a file and copies data from incoming reader. Staging buffer is used by io.CopyBuffer.
func fsCreateFile(tempObjPath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) {
if tempObjPath == "" || reader == nil || buf == nil {
func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) {
if filePath == "" || reader == nil || buf == nil {
return 0, traceError(errInvalidArgument)
}
if err := checkPathLength(tempObjPath); err != nil {
if err := checkPathLength(filePath); err != nil {
return 0, traceError(err)
}
if err := mkdirAll(pathutil.Dir(tempObjPath), 0777); err != nil {
if err := mkdirAll(pathutil.Dir(filePath), 0777); err != nil {
return 0, traceError(err)
}
writer, err := os.OpenFile(preparePath(tempObjPath), os.O_CREATE|os.O_WRONLY, 0666)
if err := checkDiskFree(pathutil.Dir(filePath), fallocSize); err != nil {
return 0, traceError(err)
}
writer, err := os.OpenFile(preparePath(filePath), os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
// File path cannot be verified since one of the parents is a file.
if isSysErrNotDir(err) {

View File

@@ -238,12 +238,9 @@ func saveFormatFS(formatPath string, fsFormat *formatConfigV1) error {
}
defer lk.Close()
if _, err = lk.Write(metadataBytes); err != nil {
return err
}
_, err = lk.Write(metadataBytes)
// Success.
return nil
return err
}
// Return if the part info in uploadedParts and completeParts are same.

View File

@@ -278,7 +278,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
}
entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket))
if strings.HasSuffix(walkResult.entry, slashSeparator) {
if hasSuffix(walkResult.entry, slashSeparator) {
uploads = append(uploads, uploadMetadata{
Object: entry,
})
@@ -314,7 +314,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
for _, upload := range uploads {
var objectName string
var uploadID string
if strings.HasSuffix(upload.Object, slashSeparator) {
if hasSuffix(upload.Object, slashSeparator) {
// All directory entries are common prefixes.
uploadID = "" // Upload ids are empty for CommonPrefixes.
objectName = upload.Object
@@ -463,7 +463,6 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
pipeReader, pipeWriter := io.Pipe()
go func() {
startOffset := int64(0) // Read the whole file.
if gerr := fs.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject)
pipeWriter.CloseWithError(gerr)
@@ -864,7 +863,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
multipartPartFile := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, partSuffix)
var reader io.ReadCloser
offset := int64(0)
var offset int64
reader, _, err = fsOpenFile(multipartPartFile, offset)
if err != nil {
fs.rwPool.Close(fsMetaPathMultipart)

View File

@@ -50,7 +50,6 @@ func (fsi *fsIOPool) Open(path string) (*lock.RLockedFile, error) {
fsi.Lock()
rlkFile, ok := fsi.readersMap[path]
// File reference exists on map, validate if its
// really closed and we are safe to purge it.
if ok && rlkFile != nil {
@@ -76,8 +75,9 @@ func (fsi *fsIOPool) Open(path string) (*lock.RLockedFile, error) {
// read lock mode.
if !ok {
var err error
var newRlkFile *lock.RLockedFile
// Open file for reading.
rlkFile, err = lock.RLockedOpenFile(preparePath(path))
newRlkFile, err = lock.RLockedOpenFile(preparePath(path))
if err != nil {
if os.IsNotExist(err) {
return nil, errFileNotFound
@@ -95,6 +95,30 @@ func (fsi *fsIOPool) Open(path string) (*lock.RLockedFile, error) {
// Save new reader on the map.
fsi.Lock()
rlkFile, ok = fsi.readersMap[path]
if ok && rlkFile != nil {
// If the file is closed and not removed from map is a bug.
if rlkFile.IsClosed() {
// Log this as an error.
errorIf(errUnexpected, "Unexpected entry found on the map %s", path)
// Purge the cached lock path from map.
delete(fsi.readersMap, path)
// Save the newly acquired read locked file.
rlkFile = newRlkFile
} else {
// Increment the lock ref, since the file is not closed yet
// and caller requested to read the file again.
rlkFile.IncLockRef()
newRlkFile.Close()
}
} else {
// Save the newly acquired read locked file.
rlkFile = newRlkFile
}
// Save the rlkFile back on the map.
fsi.readersMap[path] = rlkFile
fsi.Unlock()
}

View File

@@ -19,18 +19,14 @@ package cmd
import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"syscall"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/lock"
"github.com/minio/sha256-simd"
)
@@ -44,9 +40,6 @@ type fsObjects struct {
// temporary transactions.
fsUUID string
minFreeSpace int64
minFreeInodes int64
// FS rw pool.
rwPool *fsIOPool
@@ -74,12 +67,7 @@ func initMetaVolumeFS(fsPath, fsUUID string) error {
}
metaMultipartPath := pathJoin(fsPath, minioMetaMultipartBucket)
if err := mkdirAll(metaMultipartPath, 0777); err != nil {
return err
}
// Return success here.
return nil
return mkdirAll(metaMultipartPath, 0777)
}
@@ -141,10 +129,8 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
// Initialize fs objects.
fs := &fsObjects{
fsPath: fsPath,
fsUUID: fsUUID,
minFreeSpace: fsMinFreeSpace,
minFreeInodes: fsMinFreeInodes,
fsPath: fsPath,
fsUUID: fsUUID,
rwPool: &fsIOPool{
readersMap: make(map[string]*lock.RLockedFile),
},
@@ -170,41 +156,6 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
return fs, nil
}
// checkDiskFree verifies if disk path has sufficient minimum free disk space and files.
func (fs fsObjects) checkDiskFree() (err error) {
// We don't validate disk space or inode utilization on windows.
// Each windows calls to 'GetVolumeInformationW' takes around 3-5seconds.
if runtime.GOOS == globalWindowsOSName {
return nil
}
var di disk.Info
di, err = getDiskInfo(preparePath(fs.fsPath))
if err != nil {
return err
}
// Remove 5% from free space for cumulative disk space used for journalling, inodes etc.
availableDiskSpace := float64(di.Free) * 0.95
if int64(availableDiskSpace) <= fs.minFreeSpace {
return errDiskFull
}
// Some filesystems do not implement a way to provide total inodes available, instead inodes
// are allocated based on available disk space. For example CephFS, StoreNext CVFS, AzureFile driver.
// Allow for the available disk to be separately validate and we will validate inodes only if
// total inodes are provided by the underlying filesystem.
if di.Files != 0 && di.FSType != "NFS" {
availableFiles := int64(di.Ffree)
if availableFiles <= fs.minFreeInodes {
return errDiskFull
}
}
// Success.
return nil
}
// Should be called when process shuts down.
func (fs fsObjects) Shutdown() error {
// Cleanup and delete tmp uuid.
@@ -291,12 +242,11 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
return nil, toObjectErr(traceError(errDiskNotFound))
}
var invalidBucketNames []string
for _, entry := range entries {
if entry == minioMetaBucket+"/" || !strings.HasSuffix(entry, slashSeparator) {
// Ignore all reserved bucket names and invalid bucket names.
if isReservedOrInvalidBucket(entry) {
continue
}
var fi os.FileInfo
fi, err = fsStatDir(pathJoin(fs.fsPath, entry))
if err != nil {
@@ -310,24 +260,13 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
return nil, err
}
if !IsValidBucketName(fi.Name()) {
invalidBucketNames = append(invalidBucketNames, fi.Name())
continue
}
bucketInfos = append(bucketInfos, BucketInfo{
Name: fi.Name(),
// As os.Stat() doesn't carry other than ModTime(), use ModTime() as CreatedTime.
// As os.Stat() doesnt carry CreatedTime, use ModTime() as CreatedTime.
Created: fi.ModTime(),
})
}
// Print a user friendly message if we indeed skipped certain directories which are
// incompatible with S3's bucket name restrictions.
if len(invalidBucketNames) > 0 {
errorIf(errors.New("One or more invalid bucket names found"), "Skipping %s", invalidBucketNames)
}
// Sort bucket infos by bucket name.
sort.Sort(byBucketName(bucketInfos))
@@ -380,7 +319,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
}
// Check if this request is only metadata update.
cpMetadataOnly := strings.EqualFold(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
cpMetadataOnly := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
if cpMetadataOnly {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, srcBucket, srcObject, fsMetaJSONFile)
var wlk *lock.LockedFile
@@ -409,7 +348,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
pipeReader, pipeWriter := io.Pipe()
go func() {
startOffset := int64(0) // Read the whole file.
var startOffset int64 // Read the whole file.
if gerr := fs.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject)
pipeWriter.CloseWithError(gerr)
@@ -531,6 +470,12 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
// This is a special case with object whose name ends with
// a slash separator, we always return object not found here.
if hasSuffix(object, slashSeparator) {
return ObjectInfo{}, toObjectErr(traceError(errFileNotFound), bucket, object)
}
if err := checkGetObjArgs(bucket, object); err != nil {
return ObjectInfo{}, err
}
@@ -780,7 +725,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// Convert entry to ObjectInfo
entryToObjectInfo := func(entry string) (objInfo ObjectInfo, err error) {
if strings.HasSuffix(entry, slashSeparator) {
if hasSuffix(entry, slashSeparator) {
// Object name needs to be full path.
objInfo.Name = entry
objInfo.IsDir = true
@@ -804,7 +749,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// bucket argument is unused as we don't need to StatFile
// to figure if it's a file, just need to check that the
// object string does not end with "/".
return !strings.HasSuffix(object, slashSeparator)
return !hasSuffix(object, slashSeparator)
}
listDir := fs.listDirFactory(isLeaf)
walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh)
@@ -882,3 +827,8 @@ func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, ma
func (fs fsObjects) ListBucketsHeal() ([]BucketInfo, error) {
return []BucketInfo{}, traceError(NotImplemented{})
}
func (fs fsObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (ListMultipartsInfo, error) {
return ListMultipartsInfo{}, traceError(NotImplemented{})
}

703
cmd/gateway-handlers.go Normal file
View File

@@ -0,0 +1,703 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"encoding/json"
"encoding/xml"
router "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/policy"
)
// GetObjectHandler - GET Object
// ----------
// This implementation of the GET operation retrieves object. To use GET,
// you must have READ access to the object.
func (api gatewayAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
var object, bucket string
vars := router.Vars(r)
bucket = vars["bucket"]
object = vars["object"]
// Fetch object stat info.
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
reqAuthType := getRequestAuthType(r)
switch reqAuthType {
case authTypePresignedV2, authTypeSignedV2:
// Signature V2 validation.
s3Error := isReqAuthenticatedV2(r)
if s3Error != ErrNone {
errorIf(errSignatureMismatch, dumpRequest(r))
writeErrorResponse(w, s3Error, r.URL)
return
}
case authTypeSigned, authTypePresigned:
s3Error := isReqAuthenticated(r, serverConfig.GetRegion())
if s3Error != ErrNone {
errorIf(errSignatureMismatch, dumpRequest(r))
writeErrorResponse(w, s3Error, r.URL)
return
}
}
getObjectInfo := objectAPI.GetObjectInfo
if reqAuthType == authTypeAnonymous {
getObjectInfo = objectAPI.AnonGetObjectInfo
}
objInfo, err := getObjectInfo(bucket, object)
if err != nil {
errorIf(err, "Unable to fetch object info.")
apiErr := toAPIErrorCode(err)
if apiErr == ErrNoSuchKey {
apiErr = errAllowableObjectNotFound(bucket, r)
}
writeErrorResponse(w, apiErr, r.URL)
return
}
// Get request range.
var hrange *httpRange
rangeHeader := r.Header.Get("Range")
if rangeHeader != "" {
if hrange, err = parseRequestRange(rangeHeader, objInfo.Size); err != nil {
// Handle only errInvalidRange
// Ignore other parse error and treat it as regular Get request like Amazon S3.
if err == errInvalidRange {
writeErrorResponse(w, ErrInvalidRange, r.URL)
return
}
// log the error.
errorIf(err, "Invalid request range")
}
}
// Validate pre-conditions if any.
if checkPreconditions(w, r, objInfo) {
return
}
// Get the object.
var startOffset int64
length := objInfo.Size
if hrange != nil {
startOffset = hrange.offsetBegin
length = hrange.getLength()
}
// Indicates if any data was written to the http.ResponseWriter
dataWritten := false
// io.Writer type which keeps track if any data was written.
writer := funcToWriter(func(p []byte) (int, error) {
if !dataWritten {
// Set headers on the first write.
// Set standard object headers.
setObjectHeaders(w, objInfo, hrange)
// Set any additional requested response headers.
setGetRespHeaders(w, r.URL.Query())
dataWritten = true
}
return w.Write(p)
})
getObject := objectAPI.GetObject
if reqAuthType == authTypeAnonymous {
getObject = objectAPI.AnonGetObject
}
// Reads the object at startOffset and writes to mw.
if err := getObject(bucket, object, startOffset, length, writer); err != nil {
errorIf(err, "Unable to write to client.")
if !dataWritten {
// Error response only if no data has been written to client yet. i.e if
// partial data has already been written before an error
// occurred then no point in setting StatusCode and
// sending error XML.
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
}
return
}
if !dataWritten {
// If ObjectAPI.GetObject did not return error and no data has
// been written it would mean that it is a 0-byte object.
// call wrter.Write(nil) to set appropriate headers.
writer.Write(nil)
}
}
// HeadObjectHandler - HEAD Object
// -----------
// The HEAD operation retrieves metadata from an object without returning the object itself.
func (api gatewayAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
var object, bucket string
vars := router.Vars(r)
bucket = vars["bucket"]
object = vars["object"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponseHeadersOnly(w, ErrServerNotInitialized)
return
}
reqAuthType := getRequestAuthType(r)
switch reqAuthType {
case authTypePresignedV2, authTypeSignedV2:
// Signature V2 validation.
s3Error := isReqAuthenticatedV2(r)
if s3Error != ErrNone {
errorIf(errSignatureMismatch, dumpRequest(r))
writeErrorResponse(w, s3Error, r.URL)
return
}
case authTypeSigned, authTypePresigned:
s3Error := isReqAuthenticated(r, serverConfig.GetRegion())
if s3Error != ErrNone {
errorIf(errSignatureMismatch, dumpRequest(r))
writeErrorResponse(w, s3Error, r.URL)
return
}
}
getObjectInfo := objectAPI.GetObjectInfo
if reqAuthType == authTypeAnonymous {
getObjectInfo = objectAPI.AnonGetObjectInfo
}
objInfo, err := getObjectInfo(bucket, object)
if err != nil {
errorIf(err, "Unable to fetch object info.")
apiErr := toAPIErrorCode(err)
if apiErr == ErrNoSuchKey {
apiErr = errAllowableObjectNotFound(bucket, r)
}
writeErrorResponse(w, apiErr, r.URL)
return
}
// Validate pre-conditions if any.
if checkPreconditions(w, r, objInfo) {
return
}
// Set standard object headers.
setObjectHeaders(w, objInfo, nil)
// Successful response.
w.WriteHeader(http.StatusOK)
}
// DeleteMultipleObjectsHandler - deletes multiple objects.
func (api gatewayAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
vars := router.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:DeleteObject", serverConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
// Content-Length is required and should be non-zero
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if r.ContentLength <= 0 {
writeErrorResponse(w, ErrMissingContentLength, r.URL)
return
}
// Content-Md5 is requied should be set
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if _, ok := r.Header["Content-Md5"]; !ok {
writeErrorResponse(w, ErrMissingContentMD5, r.URL)
return
}
// Allocate incoming content length bytes.
deleteXMLBytes := make([]byte, r.ContentLength)
// Read incoming body XML bytes.
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
errorIf(err, "Unable to read HTTP body.")
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
// Unmarshal list of keys to be deleted.
deleteObjects := &DeleteObjectsRequest{}
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
errorIf(err, "Unable to unmarshal delete objects request XML.")
writeErrorResponse(w, ErrMalformedXML, r.URL)
return
}
var dErrs = make([]error, len(deleteObjects.Objects))
// Delete all requested objects in parallel.
for index, object := range deleteObjects.Objects {
dErr := objectAPI.DeleteObject(bucket, object.ObjectName)
if dErr != nil {
dErrs[index] = dErr
}
}
// Collect deleted objects and errors if any.
var deletedObjects []ObjectIdentifier
var deleteErrors []DeleteError
for index, err := range dErrs {
object := deleteObjects.Objects[index]
// Success deleted objects are collected separately.
if err == nil {
deletedObjects = append(deletedObjects, object)
continue
}
if _, ok := errorCause(err).(ObjectNotFound); ok {
// If the object is not found it should be
// accounted as deleted as per S3 spec.
deletedObjects = append(deletedObjects, object)
continue
}
errorIf(err, "Unable to delete object. %s", object.ObjectName)
// Error during delete should be collected separately.
deleteErrors = append(deleteErrors, DeleteError{
Code: errorCodeResponse[toAPIErrorCode(err)].Code,
Message: errorCodeResponse[toAPIErrorCode(err)].Description,
Key: object.ObjectName,
})
}
// Generate response
response := generateMultiDeleteResponse(deleteObjects.Quiet, deletedObjects, deleteErrors)
encodedSuccessResponse := encodeResponse(response)
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
}
// PutBucketPolicyHandler - PUT Bucket policy
// -----------------
// This implementation of the PUT operation uses the policy
// subresource to add to or replace a policy on a bucket
func (api gatewayAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
vars := router.Vars(r)
bucket := vars["bucket"]
// Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(bucket)
if err != nil {
errorIf(err, "Unable to find bucket info.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// If Content-Length is unknown or zero, deny the
// request. PutBucketPolicy always needs a Content-Length.
if r.ContentLength == -1 || r.ContentLength == 0 {
writeErrorResponse(w, ErrMissingContentLength, r.URL)
return
}
// If Content-Length is greater than maximum allowed policy size.
if r.ContentLength > maxAccessPolicySize {
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
return
}
// Read access policy up to maxAccessPolicySize.
// http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
// bucket policies are limited to 20KB in size, using a limit reader.
policyBytes, err := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize))
if err != nil {
errorIf(err, "Unable to read from client.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
{
// FIXME: consolidate bucketPolicy and policy.BucketAccessPolicy so that
// the verification below is done on the same type.
// Parse bucket policy.
policyInfo := &bucketPolicy{}
err = parseBucketPolicy(bytes.NewReader(policyBytes), policyInfo)
if err != nil {
errorIf(err, "Unable to parse bucket policy.")
writeErrorResponse(w, ErrInvalidPolicyDocument, r.URL)
return
}
// Parse check bucket policy.
if s3Error := checkBucketPolicyResources(bucket, policyInfo); s3Error != ErrNone {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
}
policyInfo := &policy.BucketAccessPolicy{}
if err = json.Unmarshal(policyBytes, policyInfo); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
var policies []BucketAccessPolicy
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
policies = append(policies, BucketAccessPolicy{
Prefix: prefix,
Policy: policy,
})
}
if err = objAPI.SetBucketPolicies(bucket, policies); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Success.
writeSuccessNoContent(w)
}
// DeleteBucketPolicyHandler - DELETE Bucket policy
// -----------------
// This implementation of the DELETE operation uses the policy
// subresource to add to remove a policy on a bucket.
func (api gatewayAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
vars := router.Vars(r)
bucket := vars["bucket"]
// Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(bucket)
if err != nil {
errorIf(err, "Unable to find bucket info.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Delete bucket access policy, by passing an empty policy
// struct.
objAPI.DeleteBucketPolicies(bucket)
// Success.
writeSuccessNoContent(w)
}
// GetBucketPolicyHandler - GET Bucket policy
// -----------------
// This operation uses the policy
// subresource to return the policy of a specified bucket.
func (api gatewayAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
vars := router.Vars(r)
bucket := vars["bucket"]
// Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(bucket)
if err != nil {
errorIf(err, "Unable to find bucket info.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
policies, err := objAPI.GetBucketPolicies(bucket)
if err != nil {
errorIf(err, "Unable to read bucket policy.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
for _, p := range policies {
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, p.Policy, bucket, p.Prefix)
}
policyBytes, err := json.Marshal(&policyInfo)
if err != nil {
errorIf(err, "Unable to read bucket policy.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Write to client.
w.Write(policyBytes)
}
// GetBucketNotificationHandler - This implementation of the GET
// operation uses the notification subresource to return the
// notification configuration of a bucket. If notifications are
// not enabled on the bucket, the operation returns an empty
// NotificationConfiguration element.
func (api gatewayAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
writeErrorResponse(w, ErrNotImplemented, r.URL)
}
// PutBucketNotificationHandler - Minio notification feature enables
// you to receive notifications when certain events happen in your bucket.
// Using this API, you can replace an existing notification configuration.
// The configuration is an XML file that defines the event types that you
// want Minio to publish and the destination where you want Minio to publish
// an event notification when it detects an event of the specified type.
// By default, your bucket has no event notifications configured. That is,
// the notification configuration will be an empty NotificationConfiguration.
func (api gatewayAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
writeErrorResponse(w, ErrNotImplemented, r.URL)
}
// ListenBucketNotificationHandler - list bucket notifications.
func (api gatewayAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
writeErrorResponse(w, ErrNotImplemented, r.URL)
}
// DeleteBucketHandler - Delete bucket
func (api gatewayAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
// DeleteBucket does not have any bucket action.
if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
vars := router.Vars(r)
bucket := vars["bucket"]
// Attempt to delete bucket.
if err := objectAPI.DeleteBucket(bucket); err != nil {
errorIf(err, "Unable to delete a bucket.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Write success response.
writeSuccessNoContent(w)
}
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
// --------------------------
// This implementation of the GET operation returns some or all (up to 1000)
// of the objects in a bucket. You can use the request parameters as selection
// criteria to return a subset of the objects in a bucket.
//
func (api gatewayAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
vars := router.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
reqAuthType := getRequestAuthType(r)
switch reqAuthType {
case authTypePresignedV2, authTypeSignedV2:
// Signature V2 validation.
s3Error := isReqAuthenticatedV2(r)
if s3Error != ErrNone {
errorIf(errSignatureMismatch, dumpRequest(r))
writeErrorResponse(w, s3Error, r.URL)
return
}
case authTypeSigned, authTypePresigned:
s3Error := isReqAuthenticated(r, serverConfig.GetRegion())
if s3Error != ErrNone {
errorIf(errSignatureMismatch, dumpRequest(r))
writeErrorResponse(w, s3Error, r.URL)
return
}
}
// Extract all the litsObjectsV1 query params to their native values.
prefix, marker, delimiter, maxKeys, _ := getListObjectsV1Args(r.URL.Query())
// Validate all the query params before beginning to serve the request.
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
listObjects := objectAPI.ListObjects
if reqAuthType == authTypeAnonymous {
listObjects = objectAPI.AnonListObjects
}
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshalled into S3 compatible XML header.
listObjectsInfo, err := listObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
errorIf(err, "Unable to list objects.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, maxKeys, listObjectsInfo)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))
}
// HeadBucketHandler - HEAD Bucket
// ----------
// This operation is useful to determine if a bucket exists.
// The operation returns a 200 OK if the bucket exists and you
// have permission to access it. Otherwise, the operation might
// return responses such as 404 Not Found and 403 Forbidden.
func (api gatewayAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
vars := router.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponseHeadersOnly(w, ErrServerNotInitialized)
return
}
reqAuthType := getRequestAuthType(r)
switch reqAuthType {
case authTypePresignedV2, authTypeSignedV2:
// Signature V2 validation.
s3Error := isReqAuthenticatedV2(r)
if s3Error != ErrNone {
errorIf(errSignatureMismatch, dumpRequest(r))
writeErrorResponse(w, s3Error, r.URL)
return
}
case authTypeSigned, authTypePresigned:
s3Error := isReqAuthenticated(r, serverConfig.GetRegion())
if s3Error != ErrNone {
errorIf(errSignatureMismatch, dumpRequest(r))
writeErrorResponse(w, s3Error, r.URL)
return
}
}
getBucketInfo := objectAPI.GetBucketInfo
if reqAuthType == authTypeAnonymous {
getBucketInfo = objectAPI.AnonGetBucketInfo
}
if _, err := getBucketInfo(bucket); err != nil {
errorIf(err, "Unable to fetch bucket info.")
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
return
}
writeSuccessResponseHeadersOnly(w)
}
// GetBucketLocationHandler - GET Bucket location.
// -------------------------
// This operation returns bucket location.
func (api gatewayAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
vars := router.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
reqAuthType := getRequestAuthType(r)
switch reqAuthType {
case authTypePresignedV2, authTypeSignedV2:
// Signature V2 validation.
s3Error := isReqAuthenticatedV2(r)
if s3Error != ErrNone {
errorIf(errSignatureMismatch, dumpRequest(r))
writeErrorResponse(w, s3Error, r.URL)
return
}
case authTypeSigned, authTypePresigned:
s3Error := isReqAuthenticated(r, globalMinioDefaultRegion)
if s3Error == ErrInvalidRegion {
// Clients like boto3 send getBucketLocation() call signed with region that is configured.
s3Error = isReqAuthenticated(r, serverConfig.GetRegion())
}
if s3Error != ErrNone {
errorIf(errSignatureMismatch, dumpRequest(r))
writeErrorResponse(w, s3Error, r.URL)
return
}
}
getBucketInfo := objectAPI.GetBucketInfo
if reqAuthType == authTypeAnonymous {
getBucketInfo = objectAPI.AnonGetBucketInfo
}
if _, err := getBucketInfo(bucket); err != nil {
errorIf(err, "Unable to fetch bucket info.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Generate response.
encodedSuccessResponse := encodeResponse(LocationResponse{})
// Get current region.
region := serverConfig.GetRegion()
if region != globalMinioDefaultRegion {
encodedSuccessResponse = encodeResponse(LocationResponse{
Location: region,
})
}
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
}

262
cmd/gateway-main.go Normal file
View File

@@ -0,0 +1,262 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"net/url"
"os"
"strings"
"github.com/gorilla/mux"
"github.com/minio/cli"
"github.com/minio/mc/pkg/console"
)
var gatewayTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} BACKEND [ENDPOINT]
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
BACKEND:
azure: Microsoft Azure Blob Storage. Default ENDPOINT is https://core.windows.net
ENVIRONMENT VARIABLES:
ACCESS:
MINIO_ACCESS_KEY: Username or access key of your storage backend.
MINIO_SECRET_KEY: Password or secret key of your storage backend.
EXAMPLES:
1. Start minio gateway server for Azure Blob Storage backend.
$ export MINIO_ACCESS_KEY=azureaccountname
$ export MINIO_SECRET_KEY=azureaccountkey
$ {{.HelpName}} azure
`
var gatewayCmd = cli.Command{
Name: "gateway",
Usage: "Start object storage gateway.",
Action: gatewayMain,
CustomHelpTemplate: gatewayTemplate,
Flags: append(serverFlags,
cli.BoolFlag{
Name: "quiet",
Usage: "Disable startup banner.",
},
),
HideHelpCommand: true,
}
// Represents the type of the gateway backend.
type gatewayBackend string
const (
azureBackend gatewayBackend = "azure"
// Add more backends here.
)
// Returns access and secretkey set from environment variables.
func mustGetGatewayCredsFromEnv() (accessKey, secretKey string) {
// Fetch access keys from environment variables.
accessKey = os.Getenv("MINIO_ACCESS_KEY")
secretKey = os.Getenv("MINIO_SECRET_KEY")
if accessKey == "" || secretKey == "" {
console.Fatalln("Access and secret keys are mandatory to run Minio gateway server.")
}
return accessKey, secretKey
}
// Initialize gateway layer depending on the backend type.
// Supported backend types are
//
// - Azure Blob Storage.
// - Add your favorite backend here.
func newGatewayLayer(backendType, endPoint, accessKey, secretKey string, secure bool) (GatewayLayer, error) {
if gatewayBackend(backendType) != azureBackend {
return nil, fmt.Errorf("Unrecognized backend type %s", backendType)
}
return newAzureLayer(endPoint, accessKey, secretKey, secure)
}
// Initialize a new gateway config.
//
// DO NOT save this config, this is meant to be
// only used in memory.
func newGatewayConfig(accessKey, secretKey, region string) error {
// Initialize server config.
srvCfg := newServerConfigV14()
// If env is set for a fresh start, save them to config file.
srvCfg.SetCredential(credential{
AccessKey: accessKey,
SecretKey: secretKey,
})
// Set default printing to console.
srvCfg.Logger.SetConsole(consoleLogger{true, "error"})
// Set custom region.
srvCfg.SetRegion(region)
// Create certs path for SSL configuration.
if err := createConfigDir(); err != nil {
return err
}
// hold the mutex lock before a new config is assigned.
// Save the new config globally.
// unlock the mutex.
serverConfigMu.Lock()
serverConfig = srvCfg
serverConfigMu.Unlock()
return nil
}
// Return endpoint.
func parseGatewayEndpoint(arg string) (endPoint string, secure bool, err error) {
schemeSpecified := len(strings.Split(arg, "://")) > 1
if !schemeSpecified {
// Default connection will be "secure".
arg = "https://" + arg
}
u, err := url.Parse(arg)
if err != nil {
return "", false, err
}
switch u.Scheme {
case "http":
return u.Host, false, nil
case "https":
return u.Host, true, nil
default:
return "", false, fmt.Errorf("Unrecognized scheme %s", u.Scheme)
}
}
// Handler for 'minio gateway'.
func gatewayMain(ctx *cli.Context) {
if !ctx.Args().Present() || ctx.Args().First() == "help" {
cli.ShowCommandHelpAndExit(ctx, "gateway", 1)
}
// Fetch access and secret key from env.
accessKey, secretKey := mustGetGatewayCredsFromEnv()
// Initialize new gateway config.
//
// TODO: add support for custom region when we add
// support for S3 backend storage, currently this can
// default to "us-east-1"
err := newGatewayConfig(accessKey, secretKey, "us-east-1")
if err != nil {
console.Fatalf("Unable to initialize gateway config. Error: %s", err)
}
// Enable console logging.
enableConsoleLogger()
// Get quiet flag from command line argument.
quietFlag := ctx.Bool("quiet") || ctx.GlobalBool("quiet")
// First argument is selected backend type.
backendType := ctx.Args().First()
// Second argument is endpoint. If no endpoint is specified then the
// gateway implementation should use a default setting.
endPoint, secure, err := parseGatewayEndpoint(ctx.Args().Get(1))
if err != nil {
console.Fatalf("Unable to parse endpoint. Error: %s", err)
}
// Create certs path for SSL configuration.
err = createConfigDir()
if err != nil {
console.Fatalf("Unable to create configuration directory. Error: %s", err)
}
newObject, err := newGatewayLayer(backendType, endPoint, accessKey, secretKey, secure)
if err != nil {
console.Fatalf("Unable to initialize gateway layer. Error: %s", err)
}
initNSLock(false) // Enable local namespace lock.
router := mux.NewRouter().SkipClean(true)
registerGatewayAPIRouter(router, newObject)
var handlerFns = []HandlerFunc{
// Validate all the incoming paths.
setPathValidityHandler,
// Limits all requests size to a maximum fixed limit
setRequestSizeLimitHandler,
// Adds 'crossdomain.xml' policy handler to serve legacy flash clients.
setCrossDomainPolicy,
// Validates all incoming requests to have a valid date header.
setTimeValidityHandler,
// CORS setting for all browser API requests.
setCorsHandler,
// Validates all incoming URL resources, for invalid/unsupported
// resources client receives a HTTP error.
setIgnoreResourcesHandler,
// Auth handler verifies incoming authorization headers and
// routes them accordingly. Client receives a HTTP error for
// invalid/unsupported signatures.
setAuthHandler,
}
apiServer := NewServerMux(ctx.String("address"), registerHandlers(router, handlerFns...))
// Set if we are SSL enabled S3 gateway.
globalIsSSL = isSSL()
// Start server, automatically configures TLS if certs are available.
go func() {
cert, key := "", ""
if globalIsSSL {
cert, key = getPublicCertFile(), getPrivateKeyFile()
}
if aerr := apiServer.ListenAndServe(cert, key); aerr != nil {
console.Fatalf("Failed to start minio server. Error: %s\n", aerr)
}
}()
apiEndPoints, err := finalizeAPIEndpoints(apiServer.Addr)
fatalIf(err, "Unable to finalize API endpoints for %s", apiServer.Addr)
// Once endpoints are finalized, initialize the new object api.
globalObjLayerMutex.Lock()
globalObjectAPI = newObject
globalObjLayerMutex.Unlock()
// Prints the formatted startup message once object layer is initialized.
if !quietFlag {
mode := ""
if gatewayBackend(backendType) == azureBackend {
mode = globalMinioModeGatewayAzure
}
checkUpdate(mode)
printGatewayStartupMessage(apiEndPoints, accessKey, secretKey, backendType)
}
<-globalServiceDoneCh
}

50
cmd/gateway-main_test.go Normal file
View File

@@ -0,0 +1,50 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import "testing"
// Test parseGatewayEndpoint
func TestParseGatewayEndpoint(t *testing.T) {
testCases := []struct {
arg string
endPoint string
secure bool
errReturned bool
}{
{"http://127.0.0.1:9000", "127.0.0.1:9000", false, false},
{"https://127.0.0.1:9000", "127.0.0.1:9000", true, false},
{"http://play.minio.io:9000", "play.minio.io:9000", false, false},
{"https://play.minio.io:9000", "play.minio.io:9000", true, false},
{"ftp://127.0.0.1:9000", "", false, true},
{"ftp://play.minio.io:9000", "", false, true},
{"play.minio.io:9000", "play.minio.io:9000", true, false},
}
for i, test := range testCases {
endPoint, secure, err := parseGatewayEndpoint(test.arg)
errReturned := err != nil
if endPoint != test.endPoint ||
secure != test.secure ||
errReturned != test.errReturned {
t.Errorf("Test %d: expected %s,%t,%t got %s,%t,%t",
i+1, test.endPoint, test.secure, test.errReturned,
endPoint, secure, errReturned)
}
}
}

122
cmd/gateway-router.go Normal file
View File

@@ -0,0 +1,122 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"io"
router "github.com/gorilla/mux"
)
// GatewayLayer - Interface to implement gateway mode.
type GatewayLayer interface {
ObjectLayer
AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error)
AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
SetBucketPolicies(string, []BucketAccessPolicy) error
GetBucketPolicies(string) ([]BucketAccessPolicy, error)
DeleteBucketPolicies(string) error
AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error)
}
// Implements and provides http handlers for S3 API.
// Overrides GetObject HeadObject and Policy related handlers.
type gatewayAPIHandlers struct {
objectAPIHandlers
ObjectAPI func() GatewayLayer
}
// registerAPIRouter - registers S3 compatible APIs.
func registerGatewayAPIRouter(mux *router.Router, gw GatewayLayer) {
// Initialize API.
api := gatewayAPIHandlers{
ObjectAPI: func() GatewayLayer { return gw },
objectAPIHandlers: objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
},
}
// API Router
apiRouter := mux.NewRoute().PathPrefix("/").Subrouter()
// Bucket router
bucket := apiRouter.PathPrefix("/{bucket}").Subrouter()
/// Object operations
// HeadObject
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(api.HeadObjectHandler)
// CopyObjectPart
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(api.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// ListObjectPxarts
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(api.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(api.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(api.NewMultipartUploadHandler).Queries("uploads", "")
// AbortMultipartUpload
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(api.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
// GetObject
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(api.GetObjectHandler)
// CopyObject
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectHandler)
// PutObject
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(api.PutObjectHandler)
// DeleteObject
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(api.DeleteObjectHandler)
/// Bucket operations
// GetBucketLocation
bucket.Methods("GET").HandlerFunc(api.GetBucketLocationHandler).Queries("location", "")
// GetBucketPolicy
bucket.Methods("GET").HandlerFunc(api.GetBucketPolicyHandler).Queries("policy", "")
// GetBucketNotification
bucket.Methods("GET").HandlerFunc(api.GetBucketNotificationHandler).Queries("notification", "")
// ListenBucketNotification
bucket.Methods("GET").HandlerFunc(api.ListenBucketNotificationHandler).Queries("events", "{events:.*}")
// ListMultipartUploads
bucket.Methods("GET").HandlerFunc(api.ListMultipartUploadsHandler).Queries("uploads", "")
// ListObjectsV2
bucket.Methods("GET").HandlerFunc(api.ListObjectsV2Handler).Queries("list-type", "2")
// ListObjectsV1 (Legacy)
bucket.Methods("GET").HandlerFunc(api.ListObjectsV1Handler)
// PutBucketPolicy
bucket.Methods("PUT").HandlerFunc(api.PutBucketPolicyHandler).Queries("policy", "")
// PutBucketNotification
bucket.Methods("PUT").HandlerFunc(api.PutBucketNotificationHandler).Queries("notification", "")
// PutBucket
bucket.Methods("PUT").HandlerFunc(api.PutBucketHandler)
// HeadBucket
bucket.Methods("HEAD").HandlerFunc(api.HeadBucketHandler)
// PostPolicy
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(api.PostPolicyBucketHandler)
// DeleteMultipleObjects
bucket.Methods("POST").HandlerFunc(api.DeleteMultipleObjectsHandler)
// DeleteBucketPolicy
bucket.Methods("DELETE").HandlerFunc(api.DeleteBucketPolicyHandler).Queries("policy", "")
// DeleteBucket
bucket.Methods("DELETE").HandlerFunc(api.DeleteBucketHandler)
/// Root operation
// ListBuckets
apiRouter.Methods("GET").HandlerFunc(api.ListBucketsHandler)
}

View File

@@ -0,0 +1,67 @@
/*
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"runtime"
"strings"
"github.com/minio/mc/pkg/console"
)
// Prints the formatted startup message.
func printGatewayStartupMessage(apiEndPoints []string, accessKey, secretKey, backendType string) {
// Prints credential.
printGatewayCommonMsg(apiEndPoints, accessKey, secretKey)
// Prints `mc` cli configuration message chooses
// first endpoint as default.
endPoint := apiEndPoints[0]
// Configure 'mc', following block prints platform specific information for minio client.
console.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
if runtime.GOOS == globalWindowsOSName {
mcMessage := fmt.Sprintf("$ mc.exe config host add my%s %s %s %s", backendType, endPoint, accessKey, secretKey)
console.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
} else {
mcMessage := fmt.Sprintf("$ mc config host add my%s %s %s %s", backendType, endPoint, accessKey, secretKey)
console.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
}
// Prints documentation message.
printObjectAPIMsg()
// SSL is configured reads certification chain, prints
// authority and expiry.
if globalIsSSL {
certs, err := readCertificateChain()
if err != nil {
console.Fatalf("Unable to read certificate chain. Error: %s", err)
}
printCertificateMsg(certs)
}
}
// Prints common server startup message. Prints credential, region and browser access.
func printGatewayCommonMsg(apiEndpoints []string, accessKey, secretKey string) {
apiEndpointStr := strings.Join(apiEndpoints, " ")
// Colorize the message and print.
console.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
console.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", accessKey)))
console.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", secretKey)))
}

Some files were not shown because too many files have changed in this diff Show More