Compare commits
271 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2fa98b1d6a | ||
|
|
99a4298938 | ||
|
|
069badc7e9 | ||
|
|
c601cb2f1e | ||
|
|
9ca7470ccc | ||
|
|
b3ca304c01 | ||
|
|
60f52f461f | ||
|
|
e5fb6294a7 | ||
|
|
a15bb19d37 | ||
|
|
7bf1caa0fe | ||
|
|
6e7962bf35 | ||
|
|
825e29f301 | ||
|
|
cebeca3075 | ||
|
|
26640033b0 | ||
|
|
bbb56739bd | ||
|
|
1cd801b2e9 | ||
|
|
bf9b619d86 | ||
|
|
bc79b435a2 | ||
|
|
0ed6daab59 | ||
|
|
e96f19c867 | ||
|
|
aaef18b1a3 | ||
|
|
a48a034e5a | ||
|
|
bf8ec8ad73 | ||
|
|
8ce424bacd | ||
|
|
cea3e3f7a6 | ||
|
|
af36c92cab | ||
|
|
1b258da108 | ||
|
|
35427a017d | ||
|
|
a0715f3b59 | ||
|
|
5a28ef0d47 | ||
|
|
3385bf3da8 | ||
|
|
1ce8d2c476 | ||
|
|
a8296445ad | ||
|
|
43c72374d4 | ||
|
|
d759a7ce99 | ||
|
|
5d2b5ee6a9 | ||
|
|
9ee619ea14 | ||
|
|
55b385beee | ||
|
|
930943f058 | ||
|
|
e6d8e272ce | ||
|
|
b52b90412b | ||
|
|
843f481eb3 | ||
|
|
f6d0645a3c | ||
|
|
414a7eca83 | ||
|
|
5cd9f10a02 | ||
|
|
b976521c83 | ||
|
|
2c3b1f01d9 | ||
|
|
a6f4cf61f2 | ||
|
|
dfa8835720 | ||
|
|
c5ac901e8d | ||
|
|
4101d4917c | ||
|
|
d966d29fed | ||
|
|
c301f5882d | ||
|
|
123cccaed1 | ||
|
|
cbd02c58be | ||
|
|
c71895f225 | ||
|
|
b83413b167 | ||
|
|
63e0a81760 | ||
|
|
8d47ef503c | ||
|
|
54eded2e6f | ||
|
|
94c88890b8 | ||
|
|
e871e27562 | ||
|
|
007a52b546 | ||
|
|
efb8b00db0 | ||
|
|
d744865dc6 | ||
|
|
e40c29e834 | ||
|
|
b0cea1c0f3 | ||
|
|
6f2b4675fa | ||
|
|
a4ce1daf99 | ||
|
|
7bdaf9bc50 | ||
|
|
55d4eee6f1 | ||
|
|
ac82798d0a | ||
|
|
5b71c21330 | ||
|
|
3e3fbdf8e6 | ||
|
|
c9349747ca | ||
|
|
2b9b907f9c | ||
|
|
9389a55e5d | ||
|
|
87e6533cf3 | ||
|
|
38bc3a45db | ||
|
|
c5faba55c1 | ||
|
|
8b5e6e338c | ||
|
|
0373a1699b | ||
|
|
559a59220e | ||
|
|
59e1763816 | ||
|
|
041a812ba0 | ||
|
|
fbfc9a61ec | ||
|
|
be9baa1464 | ||
|
|
b058e32348 | ||
|
|
ea66a52ed1 | ||
|
|
55dd017e62 | ||
|
|
12353caf35 | ||
|
|
a57c747667 | ||
|
|
28661c0413 | ||
|
|
04a152be12 | ||
|
|
bce3f8237d | ||
|
|
16a45e5aff | ||
|
|
000a60f238 | ||
|
|
bf278ca36f | ||
|
|
97f2bc26b9 | ||
|
|
bba562235b | ||
|
|
2337e5f803 | ||
|
|
ffd7b7059c | ||
|
|
5c0acbc6fc | ||
|
|
5a52bc7ff6 | ||
|
|
045e1fed2b | ||
|
|
a861d38532 | ||
|
|
20a15567b8 | ||
|
|
94f67ad224 | ||
|
|
36ee110563 | ||
|
|
1dc25bcf5f | ||
|
|
2d96745156 | ||
|
|
9d49688c87 | ||
|
|
8e09374cb8 | ||
|
|
58d90ed73c | ||
|
|
e857b6741d | ||
|
|
0505ef83b5 | ||
|
|
22bc15d89b | ||
|
|
a2e904b966 | ||
|
|
cc7dc61eb4 | ||
|
|
c4f480a839 | ||
|
|
60831e3299 | ||
|
|
037319066f | ||
|
|
bb871a7c31 | ||
|
|
0ebbd3caef | ||
|
|
bd56f80250 | ||
|
|
a39e810965 | ||
|
|
09103991ea | ||
|
|
c43f745449 | ||
|
|
9610a74c19 | ||
|
|
0bcd8abc5c | ||
|
|
70b350c383 | ||
|
|
338e9a9be9 | ||
|
|
edbd8709ec | ||
|
|
5db60a6c59 | ||
|
|
36c19f1d65 | ||
|
|
d3f9f8be88 | ||
|
|
183ec094c4 | ||
|
|
c1d2b3d5c3 | ||
|
|
be72609d1f | ||
|
|
48f2c98052 | ||
|
|
286c663495 | ||
|
|
941fed8e4a | ||
|
|
55092bede1 | ||
|
|
90a3b830f4 | ||
|
|
23b9df0694 | ||
|
|
48cb271a46 | ||
|
|
61229b38f7 | ||
|
|
90ca73af13 | ||
|
|
a04b6561a0 | ||
|
|
219d841496 | ||
|
|
baef49b4a2 | ||
|
|
3022f60561 | ||
|
|
680fdf6f90 | ||
|
|
1af6e8cb72 | ||
|
|
98d3913a1e | ||
|
|
35c38e4bd8 | ||
|
|
e43d3a075c | ||
|
|
43e0ef4248 | ||
|
|
cd7d5b59e5 | ||
|
|
299ef9b188 | ||
|
|
b30c436715 | ||
|
|
7001fe407f | ||
|
|
59f7266081 | ||
|
|
99bf4d0c42 | ||
|
|
7b8beecc81 | ||
|
|
510ec153b9 | ||
|
|
e29a37e95c | ||
|
|
4a4048fe27 | ||
|
|
da2887f914 | ||
|
|
c22439c82e | ||
|
|
38224a4c1a | ||
|
|
b4ab778cb2 | ||
|
|
22f6756ce6 | ||
|
|
0394a8f013 | ||
|
|
1039311184 | ||
|
|
a075015293 | ||
|
|
d90d4841b8 | ||
|
|
d3a2efbf91 | ||
|
|
002a205c9c | ||
|
|
91ceae23d0 | ||
|
|
1008c2c069 | ||
|
|
7abadfccc2 | ||
|
|
97090aa16c | ||
|
|
fb531235de | ||
|
|
cb1566c6e6 | ||
|
|
6d89435356 | ||
|
|
975237cbf8 | ||
|
|
501af06001 | ||
|
|
a69f74533c | ||
|
|
a19cf063b5 | ||
|
|
8b0c86298d | ||
|
|
df19546cc8 | ||
|
|
ff7799452b | ||
|
|
97be455f63 | ||
|
|
d16a409943 | ||
|
|
74efbb4153 | ||
|
|
0cfd5a21ba | ||
|
|
1ce2d29bbb | ||
|
|
48ffb7731a | ||
|
|
1cfd4a48d9 | ||
|
|
993a79d9c6 | ||
|
|
0c16b1c9a7 | ||
|
|
900cc27b51 | ||
|
|
8528017ad3 | ||
|
|
a73da7755e | ||
|
|
2c0b3cadfc | ||
|
|
763fce909b | ||
|
|
da8214845a | ||
|
|
74e2fe0879 | ||
|
|
cb7f9ba286 | ||
|
|
7906f464fd | ||
|
|
7e4c9a9e1e | ||
|
|
f6fd407e47 | ||
|
|
46ced81f41 | ||
|
|
7a02faab72 | ||
|
|
e252114f06 | ||
|
|
39b3e4f9b3 | ||
|
|
158b8c2e86 | ||
|
|
c4c79f61ce | ||
|
|
59e1d94770 | ||
|
|
59e847aebe | ||
|
|
a13b58f630 | ||
|
|
9e7a19d6b9 | ||
|
|
16c648b109 | ||
|
|
c871456269 | ||
|
|
55aa20595f | ||
|
|
707ed2b302 | ||
|
|
78be3f8947 | ||
|
|
67d508b214 | ||
|
|
0022c9d210 | ||
|
|
9b4a81ee60 | ||
|
|
b3f22eac56 | ||
|
|
a343d14f19 | ||
|
|
9c90a28546 | ||
|
|
d9a7f80f68 | ||
|
|
502456db91 | ||
|
|
3eb7a8bde8 | ||
|
|
ac3b59645e | ||
|
|
72929ec05b | ||
|
|
757c639044 | ||
|
|
589df3d5e7 | ||
|
|
a71e08efb4 | ||
|
|
c422f7f412 | ||
|
|
08b9244c48 | ||
|
|
efbc665ad3 | ||
|
|
998f01fadc | ||
|
|
4b858b562a | ||
|
|
033f3a4d51 | ||
|
|
cf2a436bc8 | ||
|
|
64998fc4ab | ||
|
|
43be00ea1b | ||
|
|
3517f64d20 | ||
|
|
c5f26d5cdd | ||
|
|
c113d4e49c | ||
|
|
a436f2baa5 | ||
|
|
ba76cd3268 | ||
|
|
781012517d | ||
|
|
091b9b661f | ||
|
|
af6c6a2b35 | ||
|
|
b93ef73f9b | ||
|
|
83ca1a8d64 | ||
|
|
27ef1262bf | ||
|
|
ae002aa724 | ||
|
|
a3ec71bc28 | ||
|
|
ab711fe1a2 | ||
|
|
35d19a4ae2 | ||
|
|
f767a2538a | ||
|
|
0c75395abe | ||
|
|
188cf1d5ce | ||
|
|
d96584ef58 | ||
|
|
d42496cc74 |
46
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
46
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: community, triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
<!--- If you're describing a bug, tell us what should happen -->
|
||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
||||
|
||||
## Current Behavior
|
||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||
|
||||
## Possible Solution
|
||||
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
|
||||
<!--- or ideas how to implement the addition or change -->
|
||||
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. Include code to reproduce, if relevant -->
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
4.
|
||||
|
||||
## Context
|
||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||
|
||||
## Regression
|
||||
<!-- Is this issue a regression? (Yes / No) -->
|
||||
<!-- If Yes, optionally please include minio version or commit id or PR# that caused this regression, if you have these details. -->
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used (`minio version`):
|
||||
* Environment name and version (e.g. nginx 1.9.1):
|
||||
* Server type and version:
|
||||
* Operating System and version (`uname -a`):
|
||||
* Link to your project:
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: community, triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
28
.github/PULL_REQUEST_TEMPLATE.md
vendored
28
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,33 +1,19 @@
|
||||
<!--- Provide a general summary of your changes in the Title above -->
|
||||
|
||||
## Description
|
||||
<!--- Describe your changes in detail -->
|
||||
|
||||
|
||||
## Motivation and Context
|
||||
<!--- Why is this change required? What problem does it solve? -->
|
||||
<!--- If it fixes an open issue, please link to the issue here. -->
|
||||
|
||||
## Regression
|
||||
<!-- Is this PR fixing a regression? (Yes / No) -->
|
||||
<!-- If Yes, optionally please include minio version or commit id or PR# that caused this regression, if you have these details. -->
|
||||
|
||||
## How Has This Been Tested?
|
||||
<!--- Please describe in detail how you tested your changes. -->
|
||||
<!--- Include details of your testing environment, and the tests you ran to -->
|
||||
<!--- see how your change affects other areas of the code, etc. -->
|
||||
## How to test this PR?
|
||||
|
||||
|
||||
## Types of changes
|
||||
<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
|
||||
- [ ] Bug fix (non-breaking change which fixes an issue)
|
||||
- [ ] New feature (non-breaking change which adds functionality)
|
||||
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
|
||||
|
||||
## Checklist:
|
||||
<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
|
||||
<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
|
||||
- [ ] My code follows the code style of this project.
|
||||
- [ ] My change requires a change to the documentation.
|
||||
- [ ] I have updated the documentation accordingly.
|
||||
- [ ] I have added unit tests to cover my changes.
|
||||
- [ ] I have added/updated functional tests in [mint](https://github.com/minio/mint). (If yes, add `mint` PR # here: )
|
||||
- [ ] All new and existing tests passed.
|
||||
- [ ] Fixes a regression (If yes, please add `commit-id` or `PR #` here)
|
||||
- [ ] Documentation needed
|
||||
- [ ] Unit tests needed
|
||||
- [ ] Functional tests needed (If yes, add [mint](https://github.com/minio/mint) PR # here: )
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -9,7 +9,6 @@ site/
|
||||
/.idea/
|
||||
/Minio.iml
|
||||
**/access.log
|
||||
build/
|
||||
vendor/**/*.js
|
||||
vendor/**/*.json
|
||||
release
|
||||
@@ -23,4 +22,3 @@ prime/
|
||||
stage/
|
||||
.sia_temp/
|
||||
config.json
|
||||
healthcheck
|
||||
|
||||
16
.travis.yml
16
.travis.yml
@@ -2,6 +2,14 @@ go_import_path: github.com/minio/minio
|
||||
|
||||
language: go
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- shellcheck
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
# this ensures PRs based on a local branch are not built twice
|
||||
# the downside is that a PR targeting a different branch is not built
|
||||
# but as a workaround you can add the branch to this list
|
||||
@@ -18,6 +26,7 @@ matrix:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
- GOPROXY=https://proxy.golang.org
|
||||
# Enable build cache
|
||||
# https://restic.net/blog/2018-09-02/travis-build-cache
|
||||
cache:
|
||||
@@ -26,7 +35,7 @@ matrix:
|
||||
- $HOME/gopath/pkg/mod
|
||||
- $HOME/go/pkg/mod
|
||||
|
||||
go: 1.12.1
|
||||
go: 1.12.5
|
||||
script:
|
||||
- make
|
||||
- diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
@@ -37,12 +46,15 @@ matrix:
|
||||
- make verify
|
||||
- make coverage
|
||||
- cd browser && yarn && yarn test && cd ..
|
||||
- bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
|
||||
|
||||
- os: windows
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
go: 1.12.1
|
||||
- GOPROXY=https://proxy.golang.org
|
||||
go: 1.12.5
|
||||
script:
|
||||
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
|
||||
- bash buildscripts/go-coverage.sh
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# MinIO Contribution Guide [](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/) [](https://codecov.io/gh/minio/minio)
|
||||
# MinIO Contribution Guide [](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
``MinIO`` community welcomes your contribution. To make the process as seamless as possible, we recommend you read this contribution guide.
|
||||
|
||||
@@ -37,7 +37,7 @@ After your code changes, make sure
|
||||
- To add test cases for the new code. If you have questions about how to do it, please ask on our [Slack](https://slack.min.io) channel.
|
||||
- To run `make verifiers`
|
||||
- To squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
|
||||
- To run `go test -race ./...` and `go build` completes.
|
||||
- To run `make test` and `make build` completes.
|
||||
|
||||
### Commit changes
|
||||
After verification, commit your changes. This is a [great post](https://chris.beams.io/posts/git-commit/) on how to write useful commit messages
|
||||
|
||||
11
Dockerfile
11
Dockerfile
@@ -9,29 +9,26 @@ ENV GO111MODULE on
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio && cd minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
|
||||
cd dockerscripts; go build -ldflags "-s -w" -o /usr/bin/healthcheck healthcheck.go
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM alpine:3.9
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=0 /go/bin/minio /usr/bin/minio
|
||||
COPY --from=0 /usr/bin/healthcheck /usr/bin/healthcheck
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' && \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
HEALTHCHECK --interval=1m CMD healthcheck
|
||||
|
||||
CMD ["minio"]
|
||||
|
||||
@@ -2,19 +2,19 @@ FROM alpine:3.9
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck /usr/bin/
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY minio /usr/bin/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' && \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/healthcheck
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -22,6 +22,4 @@ ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
HEALTHCHECK --interval=1m CMD healthcheck
|
||||
|
||||
CMD ["minio"]
|
||||
|
||||
23
Dockerfile.mint
Normal file
23
Dockerfile.mint
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM ubuntu:16.04
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
|
||||
ENV GOPATH /usr/local
|
||||
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
|
||||
ENV MINT_ROOT_DIR /mint
|
||||
|
||||
COPY mint /mint
|
||||
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && \
|
||||
apt-get --yes --quiet install wget jq curl git dnsmasq && \
|
||||
cd /mint && /mint/release.sh
|
||||
|
||||
WORKDIR /mint
|
||||
|
||||
ENTRYPOINT ["/mint/entrypoint.sh"]
|
||||
@@ -6,27 +6,25 @@ ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio && cd minio/dockerscripts && \
|
||||
go build -ldflags "-s -w" -o /usr/bin/healthcheck healthcheck.go
|
||||
git clone https://github.com/minio/minio
|
||||
|
||||
FROM alpine:3.9
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY --from=0 /usr/bin/healthcheck /usr/bin/healthcheck
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' && \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.min.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/healthcheck
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -34,6 +32,4 @@ ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
HEALTHCHECK --interval=1m CMD healthcheck
|
||||
|
||||
CMD ["minio"]
|
||||
|
||||
@@ -8,6 +8,7 @@ WORKDIR /go/src/github.com/minio/minio
|
||||
|
||||
RUN apt-get update && apt-get install -y jq
|
||||
ENV GO111MODULE=on
|
||||
ENV GOPROXY=https://proxy.golang.org
|
||||
|
||||
RUN git config --global http.cookiefile /gitcookie/.gitcookie
|
||||
|
||||
@@ -33,7 +34,7 @@ USER ci
|
||||
RUN make
|
||||
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
|
||||
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
|
||||
RUN for d in $(go list ./... | grep -v browser); do go test -v -race --timeout 15m "$d"; done
|
||||
RUN for d in $(go list ./... | grep -v browser); do go test -v -race --timeout 20m "$d"; done
|
||||
RUN make verifiers
|
||||
RUN make crosscompile
|
||||
RUN make verify
|
||||
@@ -56,15 +57,17 @@ FROM ubuntu:16.04
|
||||
|
||||
COPY --from=0 /go/src/github.com/minio/minio/minio /usr/bin/minio
|
||||
COPY buildscripts/gateway-tests.sh /usr/bin/gateway-tests.sh
|
||||
COPY mint /mint
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV LANG C.UTF-8
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /usr/local
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
ENV MINT_ROOT_DIR /mint
|
||||
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && apt-get --yes --quiet install wget jq curl git dnsmasq && \
|
||||
git clone https://github.com/minio/mint.git /mint && \
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && \
|
||||
apt-get --yes --quiet install wget jq curl git dnsmasq && \
|
||||
cd /mint && /mint/release.sh
|
||||
|
||||
WORKDIR /mint
|
||||
|
||||
41
Makefile
41
Makefile
@@ -2,6 +2,12 @@ PWD := $(shell pwd)
|
||||
GOPATH := $(shell go env GOPATH)
|
||||
LDFLAGS := $(shell go run buildscripts/gen-ldflags.go)
|
||||
|
||||
GOOS := $(shell go env GOOS)
|
||||
GOOSALT ?= 'linux'
|
||||
ifeq ($(GOOS),'darwin')
|
||||
GOOSALT = 'mac'
|
||||
endif
|
||||
|
||||
TAG ?= $(USER)
|
||||
BUILD_LDFLAGS := '$(LDFLAGS)'
|
||||
|
||||
@@ -14,8 +20,8 @@ checks:
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golint 1>/dev/null || (echo "Installing golint" && go get -u golang.org/x/lint/golint)
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet -O ${GOPATH}/bin/staticcheck https://github.com/dominikh/go-tools/releases/download/2019.1/staticcheck_linux_amd64 && chmod +x ${GOPATH}/bin/staticcheck)
|
||||
@which misspell 1>/dev/null || (echo "Installing misspell" && wget --quiet https://github.com/client9/misspell/releases/download/v0.3.4/misspell_0.3.4_linux_64bit.tar.gz && tar xf misspell_0.3.4_linux_64bit.tar.gz && mv misspell ${GOPATH}/bin/misspell && chmod +x ${GOPATH}/bin/misspell && rm -f misspell_0.3.4_linux_64bit.tar.gz)
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet -O ${GOPATH}/bin/staticcheck https://github.com/dominikh/go-tools/releases/download/2019.1/staticcheck_${GOOS}_amd64 && chmod +x ${GOPATH}/bin/staticcheck)
|
||||
@which misspell 1>/dev/null || (echo "Installing misspell" && wget --quiet https://github.com/client9/misspell/releases/download/v0.3.4/misspell_0.3.4_${GOOSALT}_64bit.tar.gz && tar xf misspell_0.3.4_${GOOSALT}_64bit.tar.gz && mv misspell ${GOPATH}/bin/misspell && chmod +x ${GOPATH}/bin/misspell && rm -f misspell_0.3.4_${GOOSALT}_64bit.tar.gz)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
@@ -24,35 +30,35 @@ verifiers: getdeps vet fmt lint staticcheck spelling
|
||||
|
||||
vet:
|
||||
@echo "Running $@"
|
||||
@GO111MODULE=on go vet github.com/minio/minio/...
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on go vet github.com/minio/minio/...
|
||||
|
||||
fmt:
|
||||
@echo "Running $@"
|
||||
@GO111MODULE=on gofmt -d cmd/
|
||||
@GO111MODULE=on gofmt -d pkg/
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on gofmt -d cmd/
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on gofmt -d pkg/
|
||||
|
||||
lint:
|
||||
@echo "Running $@"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/...
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/...
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/...
|
||||
|
||||
staticcheck:
|
||||
@echo "Running $@"
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/...
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/...
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/...
|
||||
|
||||
spelling:
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find cmd/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find pkg/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find docs/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find buildscripts/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find dockerscripts/`
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find cmd/`
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find pkg/`
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find docs/`
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find buildscripts/`
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find dockerscripts/`
|
||||
|
||||
# Builds minio, runs the verifiers then runs the tests.
|
||||
check: test
|
||||
test: verifiers build
|
||||
@echo "Running unit tests"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
|
||||
verify: build
|
||||
@echo "Verifying build"
|
||||
@@ -65,8 +71,7 @@ coverage: build
|
||||
# Builds minio locally.
|
||||
build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/dockerscripts/healthcheck $(PWD)/dockerscripts/healthcheck.go 1>/dev/null
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
docker: build
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
@@ -74,7 +79,7 @@ docker: build
|
||||
# Builds minio and installs it to $GOPATH/bin.
|
||||
install: build
|
||||
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -uf $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@echo "Installation successful. To learn more, try \"minio --help\"."
|
||||
|
||||
clean:
|
||||
|
||||
11
README.md
11
README.md
@@ -1,10 +1,15 @@
|
||||
# MinIO Quickstart Guide
|
||||
[](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/) [](https://codecov.io/gh/minio/minio)
|
||||
[](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
MinIO is an object storage server released under Apache License v2.0. It is compatible with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB.
|
||||
MinIO is an object storage server released under Apache License v2.0. It is compatible[1] with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB.
|
||||
|
||||
MinIO server is light enough to be bundled with the application stack, similar to NodeJS, Redis and MySQL.
|
||||
|
||||
[1]: MinIO in its default mode is faster and does not calculate MD5Sum unless passed by client. This may lead to incompatibility with few S3 clients like s3ql that heavily depend on MD5Sum. For full compatibility with Amazon S3 API, start MinIO with `--compat` option.
|
||||
```sh
|
||||
minio --compat server /data
|
||||
```
|
||||
|
||||
## Docker Container
|
||||
### Stable
|
||||
```
|
||||
@@ -86,7 +91,7 @@ service minio start
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.12](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
GOPROXY=https://proxy.golang.org GO111MODULE=on go get github.com/minio/minio
|
||||
```
|
||||
|
||||
## Allow port access for Firewalls
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# MinIO Quickstart Guide [](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/) [](https://codecov.io/gh/minio/minio)
|
||||
# MinIO Quickstart Guide [](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
MinIO 是一个基于Apache License v2.0开源协议的对象存储服务。它兼容亚马逊S3云存储服务接口,非常适合于存储大容量非结构化的数据,例如图片、视频、日志文件、备份数据和容器/虚拟机镜像等,而一个对象文件可以是任意大小,从几kb到最大5T不等。
|
||||
|
||||
@@ -75,7 +75,7 @@ service minio start
|
||||
|
||||
## 使用源码安装
|
||||
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://docs.min.io/docs/how-to-install-golang).
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install).
|
||||
|
||||
```sh
|
||||
go get -u github.com/minio/minio
|
||||
|
||||
41
SECURITY.md
Normal file
41
SECURITY.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
We always provide security updates for the [latest release](https://github.com/minio/minio/releases/latest).
|
||||
Whenever there is a security update you just need to upgrade to the latest version.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
All security bugs in [minio/minio](https://github,com/minio/minio) (or other minio/* repositories)
|
||||
should be reported by email to security@min.io. Your email will be acknowledged within 48 hours,
|
||||
and you'll receive a more detailed response to your email within 72 hours indicating the next steps
|
||||
in handling your report.
|
||||
|
||||
Please, provide a detailed explanation of the issue. In particular, outline the type of the security
|
||||
issue (DoS, authentication bypass, information disclouse, ...) and the assumptions you're making (e.g. do
|
||||
you need access credentials for a successful exploit).
|
||||
|
||||
If you have not received a reply to your email within 48 hours or you have not heard from the security team
|
||||
for the past five days please contact the security team directly:
|
||||
- Primary security coordinator: aead@min.io
|
||||
- Secondary coordinator: harsha@min.io
|
||||
- If you receive no response: dev@min.io
|
||||
|
||||
### Disclosure Process
|
||||
|
||||
MinIO uses the following disclosure process:
|
||||
|
||||
1. Once the security report is received one member of the security team tries to verify and reproduce
|
||||
the issue and determines the impact it has.
|
||||
2. A member of the security team will respond and either confrim or reject the security report.
|
||||
If the report is rejected the repsonse explains why.
|
||||
3. Code is audited to find any potential similar problems.
|
||||
4. Fixes are prepared for the latest release.
|
||||
5. On the date that the fixes are applied a security advisory will be published on https://blog.min.io.
|
||||
Please inform us in your report email whether MinIO should mention your contribution w.r.t. fixing
|
||||
the security issue. By default MinIO will **not** publish this information to protect your privacy.
|
||||
|
||||
This process can take some time, especially when coordination is required with maintainers of other projects.
|
||||
Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we
|
||||
follow the process described above to ensure that disclosures are handled consistently.
|
||||
@@ -4,19 +4,20 @@
|
||||
|
||||
## Installation
|
||||
|
||||
### Install yarn
|
||||
### Install node
|
||||
```sh
|
||||
curl -o- -L https://yarnpkg.com/install.sh | bash
|
||||
yarn
|
||||
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.34.0/install.sh | bash
|
||||
exec -l $SHELL
|
||||
nvm install stable
|
||||
```
|
||||
|
||||
### Install `go-bindata` and `go-bindata-assetfs`
|
||||
|
||||
If you do not have a working Golang environment, please follow [Install Golang](https://docs.min.io/docs/how-to-install-golang)
|
||||
If you do not have a working Golang environment, please follow [Install Golang](https://golang.org/doc/install)
|
||||
|
||||
```sh
|
||||
go get github.com/jteeuwen/go-bindata/...
|
||||
go get github.com/elazarl/go-bindata-assetfs/...
|
||||
go get github.com/go-bindata/go-bindata/go-bindata
|
||||
go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs
|
||||
```
|
||||
|
||||
## Generating Assets
|
||||
@@ -24,7 +25,7 @@ go get github.com/elazarl/go-bindata-assetfs/...
|
||||
### Generate ui-assets.go
|
||||
|
||||
```sh
|
||||
yarn release
|
||||
npm run release
|
||||
```
|
||||
|
||||
This generates ui-assets.go in the current directory. Now do `make` in the parent directory to build the minio binary with the newly generated ``ui-assets.go``
|
||||
@@ -32,7 +33,7 @@ This generates ui-assets.go in the current directory. Now do `make` in the paren
|
||||
### Run MinIO Browser with live reload
|
||||
|
||||
```sh
|
||||
yarn dev
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://localhost:8080/minio/](http://localhost:8080/minio/) in your browser to play with the application
|
||||
@@ -66,7 +67,7 @@ index 3ccdaba..9496c56 100644
|
||||
```
|
||||
|
||||
```sh
|
||||
yarn dev
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://localhost:8888/minio/](http://localhost:8888/minio/) in your browser to play with the application
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
<!--[if lt IE 11]>
|
||||
<div class="ie-warning">
|
||||
<div class="iw-inner">
|
||||
<i class="iwi-icon fa fa-warning"></i>
|
||||
<i class="iwi-icon fas fa-exclamation-triangle"></i>
|
||||
|
||||
You are using Internet Explorer version 12.0 or lower. Due to security issues and lack of support for Web Standards it is highly recommended that you upgrade to a modern browser
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
import "babel-polyfill"
|
||||
import "./less/main.less"
|
||||
import "font-awesome/css/font-awesome.css"
|
||||
import "@fortawesome/fontawesome-free/css/all.css"
|
||||
import "material-design-iconic-font/dist/css/material-design-iconic-font.min.css"
|
||||
|
||||
import React from "react"
|
||||
|
||||
@@ -84,32 +84,32 @@ export class BrowserDropdown extends React.Component {
|
||||
<li>
|
||||
<Dropdown pullRight id="top-right-menu">
|
||||
<Dropdown.Toggle noCaret>
|
||||
<i className="fa fa-reorder" />
|
||||
<i className="fas fa-bars" />
|
||||
</Dropdown.Toggle>
|
||||
<Dropdown.Menu className="dropdown-menu-right">
|
||||
<li>
|
||||
<a target="_blank" href="https://github.com/minio/minio">
|
||||
GitHub <i className="fa fa-github" />
|
||||
GitHub <i className="fab fa-github" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.fullScreen}>
|
||||
Fullscreen <i className="fa fa-expand" />
|
||||
Fullscreen <i className="fas fa-expand" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/">
|
||||
Documentation <i className="fa fa-book" />
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://slack.min.io">
|
||||
Ask for help <i className="fa fa-question-circle" />
|
||||
Ask for help <i className="fas fa-question-circle" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" id="show-about" onClick={this.showAbout.bind(this)}>
|
||||
About <i className="fa fa-info-circle" />
|
||||
About <i className="fas fa-info-circle" />
|
||||
</a>
|
||||
{this.state.showAboutModal && (
|
||||
<AboutModal
|
||||
@@ -120,7 +120,7 @@ export class BrowserDropdown extends React.Component {
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fa fa-cog" />
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
@@ -131,7 +131,7 @@ export class BrowserDropdown extends React.Component {
|
||||
</li>
|
||||
<li>
|
||||
<a href="" id="logout" onClick={this.logout}>
|
||||
Sign Out <i className="fa fa-sign-out" />
|
||||
Sign Out <i className="fas fa-sign-out-alt" />
|
||||
</a>
|
||||
</li>
|
||||
</Dropdown.Menu>
|
||||
|
||||
@@ -18,157 +18,237 @@ import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import web from "../web"
|
||||
import * as alertActions from "../alert/actions"
|
||||
import { getRandomAccessKey, getRandomSecretKey } from "../utils"
|
||||
import jwtDecode from "jwt-decode"
|
||||
import classNames from "classnames"
|
||||
|
||||
import {
|
||||
Tooltip,
|
||||
Modal,
|
||||
ModalBody,
|
||||
ModalHeader,
|
||||
OverlayTrigger
|
||||
} from "react-bootstrap"
|
||||
import { Modal, ModalBody, ModalHeader } from "react-bootstrap"
|
||||
import InputGroup from "./InputGroup"
|
||||
import { ACCESS_KEY_MIN_LENGTH, SECRET_KEY_MIN_LENGTH } from "../constants"
|
||||
|
||||
export class ChangePasswordModal extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
keysReadOnly: false
|
||||
currentAccessKey: "",
|
||||
currentSecretKey: "",
|
||||
currentSecretKeyVisible: false,
|
||||
newAccessKey: "",
|
||||
newSecretKey: "",
|
||||
newSecretKeyVisible: false
|
||||
}
|
||||
}
|
||||
// When its shown, it loads the access key and secret key.
|
||||
// When its shown, it loads the access key from JWT token
|
||||
componentWillMount() {
|
||||
const { serverInfo } = this.props
|
||||
|
||||
// Check environment variables first.
|
||||
if (serverInfo.info.isEnvCreds || serverInfo.info.isWorm) {
|
||||
this.setState({
|
||||
accessKey: "xxxxxxxxx",
|
||||
secretKey: "xxxxxxxxx",
|
||||
keysReadOnly: true
|
||||
})
|
||||
} else {
|
||||
web.GetAuth().then(data => {
|
||||
this.setState({
|
||||
accessKey: data.accessKey,
|
||||
secretKey: data.secretKey
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Handle field changes from inside the modal.
|
||||
accessKeyChange(e) {
|
||||
const token = jwtDecode(web.GetToken())
|
||||
this.setState({
|
||||
accessKey: e.target.value
|
||||
})
|
||||
}
|
||||
|
||||
secretKeyChange(e) {
|
||||
this.setState({
|
||||
secretKey: e.target.value
|
||||
})
|
||||
}
|
||||
|
||||
secretKeyVisible(secretKeyVisible) {
|
||||
this.setState({
|
||||
secretKeyVisible
|
||||
currentAccessKey: token.sub,
|
||||
newAccessKey: token.sub
|
||||
})
|
||||
}
|
||||
|
||||
// Save the auth params and set them.
|
||||
setAuth(e) {
|
||||
const { showAlert } = this.props
|
||||
const accessKey = this.state.accessKey
|
||||
const secretKey = this.state.secretKey
|
||||
web
|
||||
.SetAuth({
|
||||
accessKey,
|
||||
secretKey
|
||||
})
|
||||
.then(data => {
|
||||
showAlert({
|
||||
type: "success",
|
||||
message: "Changed credentials"
|
||||
|
||||
if (this.canUpdateCredentials()) {
|
||||
const currentAccessKey = this.state.currentAccessKey
|
||||
const currentSecretKey = this.state.currentSecretKey
|
||||
const newAccessKey = this.state.newAccessKey
|
||||
const newSecretKey = this.state.newSecretKey
|
||||
web
|
||||
.SetAuth({
|
||||
currentAccessKey,
|
||||
currentSecretKey,
|
||||
newAccessKey,
|
||||
newSecretKey
|
||||
})
|
||||
})
|
||||
.catch(err => {
|
||||
showAlert({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
.then(data => {
|
||||
showAlert({
|
||||
type: "success",
|
||||
message: "Credentials updated successfully."
|
||||
})
|
||||
})
|
||||
})
|
||||
.catch(err => {
|
||||
showAlert({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
generateAuth(e) {
|
||||
web.GenerateAuth().then(data => {
|
||||
const { serverInfo } = this.props
|
||||
// Generate random access key only for root user
|
||||
if (!serverInfo.userInfo.isIAMUser) {
|
||||
this.setState({
|
||||
accessKey: data.accessKey,
|
||||
secretKey: data.secretKey,
|
||||
secretKeyVisible: true
|
||||
newAccessKey: getRandomAccessKey()
|
||||
})
|
||||
}
|
||||
|
||||
this.setState({
|
||||
newSecretKey: getRandomSecretKey(),
|
||||
newSecretKeyVisible: true
|
||||
})
|
||||
}
|
||||
|
||||
canChangePassword() {
|
||||
const { serverInfo } = this.props
|
||||
// Password change is not allowed in WORM mode
|
||||
if (serverInfo.info.isWorm) {
|
||||
return false
|
||||
}
|
||||
|
||||
// When credentials are set on ENV, password change not allowed for owner
|
||||
if (serverInfo.info.isEnvCreds && !serverInfo.userInfo.isIAMUser) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
canUpdateCredentials() {
|
||||
return (
|
||||
this.state.currentAccessKey.length > 0 &&
|
||||
this.state.currentSecretKey.length > 0 &&
|
||||
this.state.newAccessKey.length >= ACCESS_KEY_MIN_LENGTH &&
|
||||
this.state.newSecretKey.length >= SECRET_KEY_MIN_LENGTH
|
||||
)
|
||||
}
|
||||
|
||||
render() {
|
||||
const { hideChangePassword } = this.props
|
||||
const { hideChangePassword, serverInfo } = this.props
|
||||
const allowChangePassword = this.canChangePassword()
|
||||
|
||||
if (!allowChangePassword) {
|
||||
return (
|
||||
<Modal bsSize="sm" animation={false} show={true}>
|
||||
<ModalHeader>Change Password</ModalHeader>
|
||||
<ModalBody>
|
||||
Credentials of this user cannot be updated through MinIO Browser.
|
||||
</ModalBody>
|
||||
<div className="modal-footer">
|
||||
<button
|
||||
id="cancel-change-password"
|
||||
className="btn btn-link"
|
||||
onClick={hideChangePassword}
|
||||
>
|
||||
Close
|
||||
</button>
|
||||
</div>
|
||||
</Modal>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<Modal bsSize="sm" animation={false} show={true}>
|
||||
<ModalHeader>Change Password</ModalHeader>
|
||||
<ModalBody className="m-t-20">
|
||||
<InputGroup
|
||||
value={this.state.accessKey}
|
||||
onChange={this.accessKeyChange.bind(this)}
|
||||
id="accessKey"
|
||||
label="Access Key"
|
||||
name="accesskey"
|
||||
type="text"
|
||||
spellCheck="false"
|
||||
required="required"
|
||||
autoComplete="false"
|
||||
align="ig-left"
|
||||
readonly={this.state.keysReadOnly}
|
||||
/>
|
||||
<i
|
||||
onClick={this.secretKeyVisible.bind(
|
||||
this,
|
||||
!this.state.secretKeyVisible
|
||||
<div className="has-toggle-password">
|
||||
<InputGroup
|
||||
value={this.state.currentAccessKey}
|
||||
id="currentAccessKey"
|
||||
label="Current Access Key"
|
||||
name="currentAccesskey"
|
||||
type="text"
|
||||
spellCheck="false"
|
||||
required="required"
|
||||
autoComplete="false"
|
||||
align="ig-left"
|
||||
readonly={true}
|
||||
/>
|
||||
|
||||
<i
|
||||
onClick={() => {
|
||||
this.setState({
|
||||
currentSecretKeyVisible: !this.state.currentSecretKeyVisible
|
||||
})
|
||||
}}
|
||||
className={
|
||||
"toggle-password fas fa-eye " +
|
||||
(this.state.currentSecretKeyVisible ? "toggled" : "")
|
||||
}
|
||||
/>
|
||||
<InputGroup
|
||||
value={this.state.currentSecretKey}
|
||||
onChange={e => {
|
||||
this.setState({ currentSecretKey: e.target.value })
|
||||
}}
|
||||
id="currentSecretKey"
|
||||
label="Current Secret Key"
|
||||
name="currentSecretKey"
|
||||
type={this.state.currentSecretKeyVisible ? "text" : "password"}
|
||||
spellCheck="false"
|
||||
required="required"
|
||||
autoComplete="false"
|
||||
align="ig-left"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="has-toggle-password m-t-30">
|
||||
{!serverInfo.userInfo.isIAMUser && (
|
||||
<InputGroup
|
||||
value={this.state.newAccessKey}
|
||||
id="newAccessKey"
|
||||
label={"New Access Key"}
|
||||
name="newAccesskey"
|
||||
type="text"
|
||||
spellCheck="false"
|
||||
required="required"
|
||||
autoComplete="false"
|
||||
align="ig-left"
|
||||
onChange={e => {
|
||||
this.setState({ newAccessKey: e.target.value })
|
||||
}}
|
||||
readonly={serverInfo.userInfo.isIAMUser}
|
||||
/>
|
||||
)}
|
||||
className={
|
||||
"toggle-password fa fa-eye " +
|
||||
(this.state.secretKeyVisible ? "toggled" : "")
|
||||
}
|
||||
/>
|
||||
<InputGroup
|
||||
value={this.state.secretKey}
|
||||
onChange={this.secretKeyChange.bind(this)}
|
||||
id="secretKey"
|
||||
label="Secret Key"
|
||||
name="accesskey"
|
||||
type={this.state.secretKeyVisible ? "text" : "password"}
|
||||
spellCheck="false"
|
||||
required="required"
|
||||
autoComplete="false"
|
||||
align="ig-left"
|
||||
readonly={this.state.keysReadOnly}
|
||||
/>
|
||||
|
||||
<i
|
||||
onClick={() => {
|
||||
this.setState({
|
||||
newSecretKeyVisible: !this.state.newSecretKeyVisible
|
||||
})
|
||||
}}
|
||||
className={
|
||||
"toggle-password fas fa-eye " +
|
||||
(this.state.newSecretKeyVisible ? "toggled" : "")
|
||||
}
|
||||
/>
|
||||
<InputGroup
|
||||
value={this.state.newSecretKey}
|
||||
onChange={e => {
|
||||
this.setState({ newSecretKey: e.target.value })
|
||||
}}
|
||||
id="newSecretKey"
|
||||
label="New Secret Key"
|
||||
name="newSecretKey"
|
||||
type={this.state.newSecretKeyVisible ? "text" : "password"}
|
||||
spellCheck="false"
|
||||
required="required"
|
||||
autoComplete="false"
|
||||
align="ig-left"
|
||||
onChange={e => {
|
||||
this.setState({ newSecretKey: e.target.value })
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</ModalBody>
|
||||
<div className="modal-footer">
|
||||
<button
|
||||
id="generate-keys"
|
||||
className={
|
||||
"btn btn-primary " + (this.state.keysReadOnly ? "hidden" : "")
|
||||
}
|
||||
className={"btn btn-primary"}
|
||||
onClick={this.generateAuth.bind(this)}
|
||||
>
|
||||
Generate
|
||||
</button>
|
||||
<button
|
||||
id="update-keys"
|
||||
className={
|
||||
"btn btn-success " + (this.state.keysReadOnly ? "hidden" : "")
|
||||
}
|
||||
className={classNames({
|
||||
btn: true,
|
||||
"btn-success": this.canUpdateCredentials()
|
||||
})}
|
||||
disabled={!this.canUpdateCredentials()}
|
||||
onClick={this.setAuth.bind(this)}
|
||||
>
|
||||
Update
|
||||
@@ -198,4 +278,7 @@ const mapDispatchToProps = dispatch => {
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(ChangePasswordModal)
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
mapDispatchToProps
|
||||
)(ChangePasswordModal)
|
||||
|
||||
@@ -18,7 +18,7 @@ import React from "react"
|
||||
|
||||
export const Host = () => (
|
||||
<div className="fes-host">
|
||||
<i className="fa fa-globe" />
|
||||
<i className="fas fa-globe-americas" />
|
||||
<a href="/">{window.location.host}</a>
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -125,7 +125,7 @@ export class Login extends React.Component {
|
||||
autoComplete="new-password"
|
||||
/>
|
||||
<button className="lw-btn" type="submit">
|
||||
<i className="fa fa-sign-in" />
|
||||
<i className="fas fa-sign-in-alt" />
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
@@ -48,7 +48,7 @@ export const MainActions = ({
|
||||
<Dropdown dropup className="feb-actions" id="fe-action-toggle">
|
||||
<Dropdown.Toggle noCaret className="feba-toggle">
|
||||
<span>
|
||||
<i className="fa fa-plus" />
|
||||
<i className="fas fa-plus" />
|
||||
</span>
|
||||
</Dropdown.Toggle>
|
||||
<Dropdown.Menu>
|
||||
@@ -63,7 +63,7 @@ export const MainActions = ({
|
||||
/>
|
||||
<label htmlFor="file-input">
|
||||
{" "}
|
||||
<i className="fa fa-cloud-upload" />{" "}
|
||||
<i className="fas fa-cloud-upload-alt" />{" "}
|
||||
</label>
|
||||
</a>
|
||||
</OverlayTrigger>
|
||||
@@ -78,7 +78,7 @@ export const MainActions = ({
|
||||
showMakeBucketModal()
|
||||
}}
|
||||
>
|
||||
<i className="fa fa-hdd-o" />
|
||||
<i className="far fa-hdd" />
|
||||
</a>
|
||||
</OverlayTrigger>
|
||||
)}
|
||||
|
||||
@@ -20,7 +20,6 @@ import ClickOutHandler from "react-onclickout"
|
||||
import { connect } from "react-redux"
|
||||
|
||||
import logo from "../../img/logo.svg"
|
||||
import Dropdown from "react-bootstrap/lib/Dropdown"
|
||||
import BucketSearch from "../buckets/BucketSearch"
|
||||
import BucketList from "../buckets/BucketList"
|
||||
import Host from "./Host"
|
||||
@@ -28,8 +27,14 @@ import * as actionsCommon from "./actions"
|
||||
import web from "../web"
|
||||
|
||||
export const SideBar = ({ sidebarOpen, clickOutside }) => {
|
||||
const onClickOut = e => {
|
||||
if (e.target.classList.contains("feh-trigger")) {
|
||||
return
|
||||
}
|
||||
clickOutside()
|
||||
}
|
||||
return (
|
||||
<ClickOutHandler onClickOut={clickOutside}>
|
||||
<ClickOutHandler onClickOut={onClickOut}>
|
||||
<div
|
||||
className={classNames({
|
||||
"fe-sidebar": true,
|
||||
@@ -62,4 +67,7 @@ const mapDispatchToProps = dispatch => {
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(SideBar)
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
mapDispatchToProps
|
||||
)(SideBar)
|
||||
|
||||
@@ -17,21 +17,38 @@
|
||||
import React from "react"
|
||||
import { shallow, mount } from "enzyme"
|
||||
import { ChangePasswordModal } from "../ChangePasswordModal"
|
||||
import jwtDecode from "jwt-decode"
|
||||
|
||||
jest.mock("jwt-decode")
|
||||
|
||||
jwtDecode.mockImplementation(() => ({ sub: "minio" }))
|
||||
|
||||
jest.mock("../../web", () => ({
|
||||
GetAuth: jest.fn(() => {
|
||||
return Promise.resolve({ accessKey: "test1", secretKey: "test2" })
|
||||
}),
|
||||
GenerateAuth: jest.fn(() => {
|
||||
return Promise.resolve({ accessKey: "gen1", secretKey: "gen2" })
|
||||
}),
|
||||
SetAuth: jest.fn(({ accessKey, secretKey }) => {
|
||||
if (accessKey == "test3" && secretKey == "test4") {
|
||||
return Promise.resolve({})
|
||||
} else {
|
||||
return Promise.reject({ message: "Error" })
|
||||
SetAuth: jest.fn(
|
||||
({ currentAccessKey, currentSecretKey, newAccessKey, newSecretKey }) => {
|
||||
if (
|
||||
currentAccessKey == "minio" &&
|
||||
currentSecretKey == "minio123" &&
|
||||
newAccessKey == "test" &&
|
||||
newSecretKey == "test1234"
|
||||
) {
|
||||
return Promise.resolve({})
|
||||
} else {
|
||||
return Promise.reject({
|
||||
message: "Error"
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
),
|
||||
GetToken: jest.fn(() => "")
|
||||
}))
|
||||
|
||||
jest.mock("../../utils", () => ({
|
||||
getRandomAccessKey: () => "raccesskey",
|
||||
getRandomSecretKey: () => "rsecretkey"
|
||||
}))
|
||||
|
||||
describe("ChangePasswordModal", () => {
|
||||
@@ -40,57 +57,112 @@ describe("ChangePasswordModal", () => {
|
||||
memory: "test",
|
||||
platform: "test",
|
||||
runtime: "test",
|
||||
info: { isEnvCreds: false }
|
||||
info: { isEnvCreds: false },
|
||||
userInfo: { isIAMUser: false }
|
||||
}
|
||||
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ChangePasswordModal serverInfo={serverInfo} />)
|
||||
})
|
||||
|
||||
it("should get the keys when its rendered", () => {
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={serverInfo} />)
|
||||
setImmediate(() => {
|
||||
expect(wrapper.state("accessKey")).toBe("test1")
|
||||
expect(wrapper.state("secretKey")).toBe("test2")
|
||||
})
|
||||
it("should not allow changing password when isWorm is true", () => {
|
||||
const newServerInfo = { ...serverInfo, info: { isWorm: true } }
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
|
||||
expect(
|
||||
wrapper
|
||||
.find("ModalBody")
|
||||
.childAt(0)
|
||||
.text()
|
||||
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
|
||||
})
|
||||
|
||||
it("should show readonly keys when isEnvCreds is true", () => {
|
||||
const newServerInfo = { ...serverInfo, info: { isEnvCreds: true } }
|
||||
it("should not allow changing password when isEnvCreds is true and not IAM user", () => {
|
||||
const newServerInfo = {
|
||||
...serverInfo,
|
||||
info: { isEnvCreds: true },
|
||||
userInfo: { isIAMUser: false }
|
||||
}
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
|
||||
expect(wrapper.state("accessKey")).toBe("xxxxxxxxx")
|
||||
expect(wrapper.state("secretKey")).toBe("xxxxxxxxx")
|
||||
expect(wrapper.find("#accessKey").prop("readonly")).toBeTruthy()
|
||||
expect(wrapper.find("#secretKey").prop("readonly")).toBeTruthy()
|
||||
expect(wrapper.find("#generate-keys").hasClass("hidden")).toBeTruthy()
|
||||
expect(wrapper.find("#update-keys").hasClass("hidden")).toBeTruthy()
|
||||
expect(
|
||||
wrapper
|
||||
.find("ModalBody")
|
||||
.childAt(0)
|
||||
.text()
|
||||
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
|
||||
})
|
||||
|
||||
it("should generate accessKey and secretKey when Generate buttons is clicked", () => {
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={serverInfo} />)
|
||||
wrapper.find("#generate-keys").simulate("click")
|
||||
setImmediate(() => {
|
||||
expect(wrapper.state("accessKey")).toBe("gen1")
|
||||
expect(wrapper.state("secretKey")).toBe("gen2")
|
||||
expect(wrapper.state("newAccessKey")).toBe("raccesskey")
|
||||
expect(wrapper.state("newSecretKey")).toBe("rsecretkey")
|
||||
})
|
||||
})
|
||||
|
||||
it("should not generate accessKey for IAM User", () => {
|
||||
const newServerInfo = {
|
||||
...serverInfo,
|
||||
userInfo: { isIAMUser: true }
|
||||
}
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
|
||||
wrapper.find("#generate-keys").simulate("click")
|
||||
setImmediate(() => {
|
||||
expect(wrapper.state("newAccessKey")).toBe("minio")
|
||||
expect(wrapper.state("newSecretKey")).toBe("rsecretkey")
|
||||
})
|
||||
})
|
||||
|
||||
it("should not show new accessKey field for IAM User", () => {
|
||||
const newServerInfo = {
|
||||
...serverInfo,
|
||||
userInfo: { isIAMUser: true }
|
||||
}
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
|
||||
expect(wrapper.find("#newAccesskey").exists()).toBeFalsy()
|
||||
})
|
||||
|
||||
it("should disble Update button for invalid accessKey or secretKey", () => {
|
||||
const showAlert = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ChangePasswordModal serverInfo={serverInfo} showAlert={showAlert} />
|
||||
)
|
||||
wrapper
|
||||
.find("#currentAccessKey")
|
||||
.simulate("change", { target: { value: "minio" } })
|
||||
wrapper
|
||||
.find("#currentSecretKey")
|
||||
.simulate("change", { target: { value: "minio123" } })
|
||||
wrapper.find("#newAccessKey").simulate("change", { target: { value: "t" } })
|
||||
wrapper
|
||||
.find("#newSecretKey")
|
||||
.simulate("change", { target: { value: "t1" } })
|
||||
expect(wrapper.find("#update-keys").prop("disabled")).toBeTruthy()
|
||||
})
|
||||
|
||||
it("should update accessKey and secretKey when Update button is clicked", () => {
|
||||
const showAlert = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ChangePasswordModal serverInfo={serverInfo} showAlert={showAlert} />
|
||||
)
|
||||
wrapper
|
||||
.find("#accessKey")
|
||||
.simulate("change", { target: { value: "test3" } })
|
||||
.find("#currentAccessKey")
|
||||
.simulate("change", { target: { value: "minio" } })
|
||||
wrapper
|
||||
.find("#secretKey")
|
||||
.simulate("change", { target: { value: "test4" } })
|
||||
.find("#currentSecretKey")
|
||||
.simulate("change", { target: { value: "minio123" } })
|
||||
wrapper
|
||||
.find("#newAccessKey")
|
||||
.simulate("change", { target: { value: "test" } })
|
||||
wrapper
|
||||
.find("#newSecretKey")
|
||||
.simulate("change", { target: { value: "test1234" } })
|
||||
expect(wrapper.find("#update-keys").prop("disabled")).toBeFalsy()
|
||||
wrapper.find("#update-keys").simulate("click")
|
||||
setImmediate(() => {
|
||||
expect(showAlert).toHaveBeenCalledWith({
|
||||
type: "success",
|
||||
message: "Changed credentials"
|
||||
message: "Credentials updated successfully."
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -35,7 +35,20 @@ describe("SideBar", () => {
|
||||
it("should call clickOutside when the user clicks outside the sidebar", () => {
|
||||
const clickOutside = jest.fn()
|
||||
const wrapper = shallow(<SideBar clickOutside={clickOutside} />)
|
||||
wrapper.simulate("clickOut", { preventDefault: jest.fn() })
|
||||
wrapper.simulate("clickOut", {
|
||||
preventDefault: jest.fn(),
|
||||
target: { classList: { contains: jest.fn(() => false) } }
|
||||
})
|
||||
expect(clickOutside).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should not call clickOutside when user clicks on sidebar toggle", () => {
|
||||
const clickOutside = jest.fn()
|
||||
const wrapper = shallow(<SideBar clickOutside={clickOutside} />)
|
||||
wrapper.simulate("clickOut", {
|
||||
preventDefault: jest.fn(),
|
||||
target: { classList: { contains: jest.fn(() => true) } }
|
||||
})
|
||||
expect(clickOutside).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -34,7 +34,7 @@ export const fetchStorageInfo = () => {
|
||||
return web.StorageInfo().then(res => {
|
||||
const storageInfo = {
|
||||
total: res.storageInfo.Total,
|
||||
used: res.storageInfo.Used
|
||||
used: res.storageInfo.Used
|
||||
}
|
||||
dispatch(setStorageInfo(storageInfo))
|
||||
})
|
||||
@@ -54,7 +54,8 @@ export const fetchServerInfo = () => {
|
||||
memory: res.MinioMemory,
|
||||
platform: res.MinioPlatform,
|
||||
runtime: res.MinioRuntime,
|
||||
info: res.MinioGlobalInfo
|
||||
info: res.MinioGlobalInfo,
|
||||
userInfo: res.MinioUserInfo
|
||||
}
|
||||
dispatch(setServerInfo(serverInfo))
|
||||
})
|
||||
|
||||
@@ -29,7 +29,7 @@ let BrowserUpdate = ({latestUiVersion}) => {
|
||||
<a href="">
|
||||
<OverlayTrigger placement="left" overlay={ <Tooltip id="tt-version-update">
|
||||
New update available. Click to refresh.
|
||||
</Tooltip> }> <i className="fa fa-refresh"></i> </OverlayTrigger>
|
||||
</Tooltip> }> <i className="fas fa-sync"></i> </OverlayTrigger>
|
||||
</a>
|
||||
</li>
|
||||
)
|
||||
|
||||
@@ -28,3 +28,13 @@ export const NONE = "none"
|
||||
export const SHARE_OBJECT_EXPIRY_DAYS = 5
|
||||
export const SHARE_OBJECT_EXPIRY_HOURS = 0
|
||||
export const SHARE_OBJECT_EXPIRY_MINUTES = 0
|
||||
|
||||
export const ACCESS_KEY_MIN_LENGTH = 3
|
||||
export const SECRET_KEY_MIN_LENGTH = 8
|
||||
|
||||
export const SORT_BY_NAME = "name"
|
||||
export const SORT_BY_SIZE = "size"
|
||||
export const SORT_BY_LAST_MODIFIED = "last-modified"
|
||||
|
||||
export const SORT_ORDER_ASC = "asc"
|
||||
export const SORT_ORDER_DESC = "desc"
|
||||
|
||||
@@ -23,7 +23,7 @@ export const DeleteObjectConfirmModal = ({
|
||||
}) => (
|
||||
<ConfirmModal
|
||||
show={true}
|
||||
icon="fa fa-exclamation-triangle mci-red"
|
||||
icon="fas fa-exclamation-triangle mci-red"
|
||||
text="Are you sure you want to delete?"
|
||||
sub="This cannot be undone!"
|
||||
okText="Delete"
|
||||
|
||||
@@ -67,14 +67,14 @@ export class ObjectActions extends React.Component {
|
||||
className="fiad-action"
|
||||
onClick={this.shareObject.bind(this)}
|
||||
>
|
||||
<i className="fa fa-share-alt" />
|
||||
<i className="fas fa-share-alt" />
|
||||
</a>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
onClick={this.showDeleteConfirmModal.bind(this)}
|
||||
>
|
||||
<i className="fa fa-trash" />
|
||||
<i className="fas fa-trash-alt" />
|
||||
</a>
|
||||
</Dropdown.Menu>
|
||||
{(showShareObjectModal && shareObjectName === object.name) &&
|
||||
|
||||
@@ -51,7 +51,7 @@ export const ObjectItem = ({
|
||||
</div>
|
||||
<div className="fesl-item fesl-item-name">
|
||||
<a
|
||||
href="#"
|
||||
href={getDataType(name, contentType) === "folder" ? name : "#"}
|
||||
onClick={e => {
|
||||
e.preventDefault()
|
||||
if (onClick) {
|
||||
|
||||
@@ -59,7 +59,7 @@ export class ObjectsBulkActions extends React.Component {
|
||||
}
|
||||
>
|
||||
<span className="la-label">
|
||||
<i className="fa fa-check-circle" /> {checkedObjects.length}
|
||||
<i className="fas fa-check-circle" /> {checkedObjects.length}
|
||||
{checkedObjects.length === 1 ? " Object " : " Objects "}
|
||||
selected
|
||||
</span>
|
||||
@@ -81,7 +81,7 @@ export class ObjectsBulkActions extends React.Component {
|
||||
</button>
|
||||
</span>
|
||||
<i
|
||||
className="la-close fa fa-times"
|
||||
className="la-close fas fa-times"
|
||||
id="close-bulk-actions"
|
||||
onClick={clearChecked}
|
||||
/>
|
||||
|
||||
@@ -18,11 +18,19 @@ import React from "react"
|
||||
import classNames from "classnames"
|
||||
import { connect } from "react-redux"
|
||||
import * as actionsObjects from "./actions"
|
||||
import {
|
||||
SORT_BY_NAME,
|
||||
SORT_BY_SIZE,
|
||||
SORT_BY_LAST_MODIFIED,
|
||||
SORT_ORDER_DESC,
|
||||
SORT_ORDER_ASC
|
||||
} from "../constants"
|
||||
|
||||
export const ObjectsHeader = ({
|
||||
sortNameOrder,
|
||||
sortSizeOrder,
|
||||
sortLastModifiedOrder,
|
||||
sortedByName,
|
||||
sortedBySize,
|
||||
sortedByLastModified,
|
||||
sortOrder,
|
||||
sortObjects
|
||||
}) => (
|
||||
<div className="feb-container">
|
||||
@@ -31,48 +39,54 @@ export const ObjectsHeader = ({
|
||||
<div
|
||||
className="fesl-item fesl-item-name"
|
||||
id="sort-by-name"
|
||||
onClick={() => sortObjects("name")}
|
||||
onClick={() => sortObjects(SORT_BY_NAME)}
|
||||
data-sort="name"
|
||||
>
|
||||
Name
|
||||
<i
|
||||
className={classNames({
|
||||
"fesli-sort": true,
|
||||
fa: true,
|
||||
"fa-sort-alpha-desc": sortNameOrder,
|
||||
"fa-sort-alpha-asc": !sortNameOrder
|
||||
"fesli-sort--active": sortedByName,
|
||||
fas: true,
|
||||
"fa-sort-alpha-down-alt": sortedByName && sortOrder === SORT_ORDER_DESC,
|
||||
"fa-sort-alpha-down": sortedByName && sortOrder === SORT_ORDER_ASC
|
||||
})}
|
||||
/>
|
||||
</div>
|
||||
<div
|
||||
className="fesl-item fesl-item-size"
|
||||
id="sort-by-size"
|
||||
onClick={() => sortObjects("size")}
|
||||
onClick={() => sortObjects(SORT_BY_SIZE)}
|
||||
data-sort="size"
|
||||
>
|
||||
Size
|
||||
<i
|
||||
className={classNames({
|
||||
"fesli-sort": true,
|
||||
fa: true,
|
||||
"fa-sort-amount-desc": sortSizeOrder,
|
||||
"fa-sort-amount-asc": !sortSizeOrder
|
||||
"fesli-sort--active": sortedBySize,
|
||||
fas: true,
|
||||
"fa-sort-amount-down":
|
||||
sortedBySize && sortOrder === SORT_ORDER_DESC,
|
||||
"fa-sort-amount-down-alt": sortedBySize && sortOrder === SORT_ORDER_ASC
|
||||
})}
|
||||
/>
|
||||
</div>
|
||||
<div
|
||||
className="fesl-item fesl-item-modified"
|
||||
id="sort-by-last-modified"
|
||||
onClick={() => sortObjects("last-modified")}
|
||||
onClick={() => sortObjects(SORT_BY_LAST_MODIFIED)}
|
||||
data-sort="last-modified"
|
||||
>
|
||||
Last Modified
|
||||
<i
|
||||
className={classNames({
|
||||
"fesli-sort": true,
|
||||
fa: true,
|
||||
"fa-sort-numeric-desc": sortLastModifiedOrder,
|
||||
"fa-sort-numeric-asc": !sortLastModifiedOrder
|
||||
"fesli-sort--active": sortedByLastModified,
|
||||
fas: true,
|
||||
"fa-sort-numeric-down-alt":
|
||||
sortedByLastModified && sortOrder === SORT_ORDER_DESC,
|
||||
"fa-sort-numeric-down":
|
||||
sortedByLastModified && sortOrder === SORT_ORDER_ASC
|
||||
})}
|
||||
/>
|
||||
</div>
|
||||
@@ -83,10 +97,10 @@ export const ObjectsHeader = ({
|
||||
|
||||
const mapStateToProps = state => {
|
||||
return {
|
||||
sortNameOrder: state.objects.sortBy == "name" && state.objects.sortOrder,
|
||||
sortSizeOrder: state.objects.sortBy == "size" && state.objects.sortOrder,
|
||||
sortLastModifiedOrder:
|
||||
state.objects.sortBy == "last-modified" && state.objects.sortOrder
|
||||
sortedByName: state.objects.sortBy == SORT_BY_NAME,
|
||||
sortedBySize: state.objects.sortBy == SORT_BY_SIZE,
|
||||
sortedByLastModified: state.objects.sortBy == SORT_BY_LAST_MODIFIED,
|
||||
sortOrder: state.objects.sortOrder
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,4 +110,7 @@ const mapDispatchToProps = dispatch => {
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(ObjectsHeader)
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
mapDispatchToProps
|
||||
)(ObjectsHeader)
|
||||
|
||||
@@ -15,32 +15,52 @@
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import classNames from "classnames"
|
||||
import { connect } from "react-redux"
|
||||
import InfiniteScroll from "react-infinite-scroller"
|
||||
import * as actionsObjects from "./actions"
|
||||
import ObjectsList from "./ObjectsList"
|
||||
|
||||
export class ObjectsListContainer extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
page: 1
|
||||
}
|
||||
this.loadNextPage = this.loadNextPage.bind(this)
|
||||
}
|
||||
componentWillReceiveProps(nextProps) {
|
||||
if (
|
||||
nextProps.currentBucket !== this.props.currentBucket ||
|
||||
nextProps.currentPrefix !== this.props.currentPrefix ||
|
||||
nextProps.sortBy !== this.props.sortBy ||
|
||||
nextProps.sortOrder !== this.props.sortOrder
|
||||
) {
|
||||
this.setState({
|
||||
page: 1
|
||||
})
|
||||
}
|
||||
}
|
||||
loadNextPage() {
|
||||
this.setState(state => {
|
||||
return { page: state.page + 1 }
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { objects, isTruncated, currentBucket, loadObjects } = this.props
|
||||
const { objects, listLoading } = this.props
|
||||
|
||||
const visibleObjects = objects.slice(0, this.state.page * 100)
|
||||
|
||||
return (
|
||||
<div className="feb-container">
|
||||
<div style={{ position: "relative" }}>
|
||||
<InfiniteScroll
|
||||
pageStart={0}
|
||||
loadMore={() => loadObjects(true)}
|
||||
hasMore={isTruncated}
|
||||
loadMore={this.loadNextPage}
|
||||
hasMore={objects.length > visibleObjects.length}
|
||||
useWindow={true}
|
||||
initialLoad={false}
|
||||
>
|
||||
<ObjectsList objects={objects} />
|
||||
<ObjectsList objects={visibleObjects} />
|
||||
</InfiniteScroll>
|
||||
<div
|
||||
className="text-center"
|
||||
style={{ display: isTruncated && currentBucket ? "block" : "none" }}
|
||||
>
|
||||
<span>Loading...</span>
|
||||
</div>
|
||||
{listLoading && <div className="loading" />}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -51,16 +71,10 @@ const mapStateToProps = state => {
|
||||
currentBucket: state.buckets.currentBucket,
|
||||
currentPrefix: state.objects.currentPrefix,
|
||||
objects: state.objects.list,
|
||||
isTruncated: state.objects.isTruncated
|
||||
sortBy: state.objects.sortBy,
|
||||
sortOrder: state.objects.sortOrder,
|
||||
listLoading: state.objects.listLoading
|
||||
}
|
||||
}
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
return {
|
||||
loadObjects: append => dispatch(actionsObjects.fetchObjects(append))
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(
|
||||
ObjectsListContainer
|
||||
)
|
||||
export default connect(mapStateToProps)(ObjectsListContainer)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2016 MinIO, Inc.
|
||||
* MinIO Cloud Storage (C) 2016, 2018, 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,46 +16,146 @@
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import ClickOutHandler from "react-onclickout"
|
||||
import { OverlayTrigger, Tooltip } from "react-bootstrap"
|
||||
import { getCurrentBucket } from "../buckets/selectors"
|
||||
import * as actionsObjects from "./actions"
|
||||
import * as actionsBuckets from "../buckets/actions"
|
||||
|
||||
export const Path = ({ currentBucket, currentPrefix, selectPrefix }) => {
|
||||
const onPrefixClick = (e, prefix) => {
|
||||
e.preventDefault()
|
||||
selectPrefix(prefix)
|
||||
export class Path extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
isEditing: false,
|
||||
path: ""
|
||||
}
|
||||
}
|
||||
let dirPath = []
|
||||
let path = ""
|
||||
if (currentPrefix) {
|
||||
path = currentPrefix.split("/").map((dir, i) => {
|
||||
if (dir) {
|
||||
dirPath.push(dir)
|
||||
let dirPath_ = dirPath.join("/") + "/"
|
||||
return (
|
||||
<span key={i}>
|
||||
<a href="" onClick={e => onPrefixClick(e, dirPath_)}>
|
||||
{dir}
|
||||
</a>
|
||||
</span>
|
||||
)
|
||||
}
|
||||
stopEditing() {
|
||||
this.setState({
|
||||
isEditing: false
|
||||
})
|
||||
}
|
||||
|
||||
return (
|
||||
<h2>
|
||||
<span className="main">
|
||||
<a onClick={e => onPrefixClick(e, "")} href="">
|
||||
{currentBucket}
|
||||
</a>
|
||||
</span>
|
||||
{path}
|
||||
</h2>
|
||||
)
|
||||
onPrefixClick(e, prefix) {
|
||||
e.preventDefault()
|
||||
const { selectPrefix } = this.props
|
||||
selectPrefix(prefix)
|
||||
}
|
||||
onEditClick(e) {
|
||||
e.preventDefault()
|
||||
const { currentBucket, currentPrefix } = this.props
|
||||
this.setState(
|
||||
{
|
||||
isEditing: true,
|
||||
path: `${currentBucket}/${currentPrefix}`
|
||||
},
|
||||
() => {
|
||||
// focus on input and move cursor to the end
|
||||
this.pathInput.focus()
|
||||
this.pathInput.setSelectionRange(
|
||||
this.state.path.length,
|
||||
this.state.path.length
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
onKeyDown(e) {
|
||||
// When Esc key is pressed
|
||||
if (e.keyCode === 27) {
|
||||
this.stopEditing()
|
||||
}
|
||||
}
|
||||
onInputClickOut() {
|
||||
this.stopEditing()
|
||||
}
|
||||
bucketExists(bucketName) {
|
||||
const { buckets } = this.props
|
||||
return buckets.includes(bucketName)
|
||||
}
|
||||
async onSubmit(e) {
|
||||
e.preventDefault()
|
||||
const { makeBucket, selectBucket } = this.props
|
||||
// all paths need to end in slash to display contents properly
|
||||
let path = this.state.path
|
||||
if (!path.endsWith("/")) {
|
||||
path += "/"
|
||||
}
|
||||
const splittedPath = path.split("/")
|
||||
if (splittedPath.length > 0) {
|
||||
// prevent bucket name from being empty
|
||||
if (splittedPath[0]) {
|
||||
const bucketName = splittedPath[0]
|
||||
const prefix = splittedPath.slice(1).join("/")
|
||||
if (!this.bucketExists(bucketName)) {
|
||||
await makeBucket(bucketName)
|
||||
}
|
||||
// check updated buckets and don't proceed on invalid inputs
|
||||
if (this.bucketExists(bucketName)) {
|
||||
// then select bucket with prefix
|
||||
selectBucket(bucketName, prefix)
|
||||
}
|
||||
this.stopEditing()
|
||||
}
|
||||
}
|
||||
}
|
||||
render() {
|
||||
const pathTooltip = <Tooltip id="tt-path">Choose or create new path</Tooltip>
|
||||
const { currentBucket, currentPrefix } = this.props
|
||||
let dirPath = []
|
||||
let path = ""
|
||||
if (currentPrefix) {
|
||||
path = currentPrefix.split("/").map((dir, i) => {
|
||||
if (dir) {
|
||||
dirPath.push(dir)
|
||||
let dirPath_ = dirPath.join("/") + "/"
|
||||
return (
|
||||
<span key={i}>
|
||||
<a href="" onClick={e => this.onPrefixClick(e, dirPath_)}>
|
||||
{dir}
|
||||
</a>
|
||||
</span>
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
return (
|
||||
<h2>
|
||||
{this.state.isEditing ? (
|
||||
<ClickOutHandler onClickOut={() => this.onInputClickOut()}>
|
||||
<form onSubmit={e => this.onSubmit(e)}>
|
||||
<input
|
||||
className="form-control form-control--path"
|
||||
type="text"
|
||||
placeholder="Choose or create new path"
|
||||
ref={node => (this.pathInput = node)}
|
||||
onKeyDown={e => this.onKeyDown(e)}
|
||||
value={this.state.path}
|
||||
onChange={e => this.setState({ path: e.target.value })}
|
||||
/>
|
||||
</form>
|
||||
</ClickOutHandler>
|
||||
) : (
|
||||
<React.Fragment>
|
||||
<span className="main">
|
||||
<a href="" onClick={e => this.onPrefixClick(e, "")}>
|
||||
{currentBucket}
|
||||
</a>
|
||||
</span>
|
||||
{path}
|
||||
<OverlayTrigger placement="bottom" overlay={pathTooltip}>
|
||||
<a href="" onClick={e => this.onEditClick(e)} className="fe-edit">
|
||||
<i className="fas fa-folder-plus" />
|
||||
</a>
|
||||
</OverlayTrigger>
|
||||
</React.Fragment>
|
||||
)}
|
||||
</h2>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const mapStateToProps = state => {
|
||||
return {
|
||||
buckets: state.buckets.list,
|
||||
currentBucket: getCurrentBucket(state),
|
||||
currentPrefix: state.objects.currentPrefix
|
||||
}
|
||||
@@ -63,8 +163,14 @@ const mapStateToProps = state => {
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
return {
|
||||
makeBucket: bucket => dispatch(actionsBuckets.makeBucket(bucket)),
|
||||
selectBucket: (bucket, prefix) =>
|
||||
dispatch(actionsBuckets.selectBucket(bucket, prefix)),
|
||||
selectPrefix: prefix => dispatch(actionsObjects.selectPrefix(prefix))
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(Path)
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
mapDispatchToProps
|
||||
)(Path)
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { ObjectsHeader } from "../ObjectsHeader"
|
||||
import { SORT_ORDER_ASC, SORT_ORDER_DESC } from "../../constants"
|
||||
|
||||
describe("ObjectsHeader", () => {
|
||||
it("should render without crashing", () => {
|
||||
@@ -24,47 +25,87 @@ describe("ObjectsHeader", () => {
|
||||
shallow(<ObjectsHeader sortObjects={sortObjects} />)
|
||||
})
|
||||
|
||||
it("should render columns with asc classes by default", () => {
|
||||
it("should render the name column with asc class when objects are sorted by name asc", () => {
|
||||
const sortObjects = jest.fn()
|
||||
const wrapper = shallow(<ObjectsHeader sortObjects={sortObjects} />)
|
||||
const wrapper = shallow(
|
||||
<ObjectsHeader
|
||||
sortObjects={sortObjects}
|
||||
sortedByName={true}
|
||||
sortOrder={SORT_ORDER_ASC}
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
wrapper.find("#sort-by-name i").hasClass("fa-sort-alpha-asc")
|
||||
).toBeTruthy()
|
||||
expect(
|
||||
wrapper.find("#sort-by-size i").hasClass("fa-sort-amount-asc")
|
||||
).toBeTruthy()
|
||||
expect(
|
||||
wrapper.find("#sort-by-last-modified i").hasClass("fa-sort-numeric-asc")
|
||||
wrapper.find("#sort-by-name i").hasClass("fa-sort-alpha-down")
|
||||
).toBeTruthy()
|
||||
})
|
||||
|
||||
it("should render name column with desc class when objects are sorted by name", () => {
|
||||
it("should render the name column with desc class when objects are sorted by name desc", () => {
|
||||
const sortObjects = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectsHeader sortObjects={sortObjects} sortNameOrder={true} />
|
||||
<ObjectsHeader
|
||||
sortObjects={sortObjects}
|
||||
sortedByName={true}
|
||||
sortOrder={SORT_ORDER_DESC}
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
wrapper.find("#sort-by-name i").hasClass("fa-sort-alpha-desc")
|
||||
wrapper.find("#sort-by-name i").hasClass("fa-sort-alpha-down-alt")
|
||||
).toBeTruthy()
|
||||
})
|
||||
|
||||
it("should render size column with desc class when objects are sorted by size", () => {
|
||||
it("should render the size column with asc class when objects are sorted by size asc", () => {
|
||||
const sortObjects = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectsHeader sortObjects={sortObjects} sortSizeOrder={true} />
|
||||
<ObjectsHeader
|
||||
sortObjects={sortObjects}
|
||||
sortedBySize={true}
|
||||
sortOrder={SORT_ORDER_ASC}
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
wrapper.find("#sort-by-size i").hasClass("fa-sort-amount-desc")
|
||||
wrapper.find("#sort-by-size i").hasClass("fa-sort-amount-down-alt")
|
||||
).toBeTruthy()
|
||||
})
|
||||
|
||||
it("should render last modified column with desc class when objects are sorted by last modified", () => {
|
||||
it("should render the size column with desc class when objects are sorted by size desc", () => {
|
||||
const sortObjects = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectsHeader sortObjects={sortObjects} sortLastModifiedOrder={true} />
|
||||
<ObjectsHeader
|
||||
sortObjects={sortObjects}
|
||||
sortedBySize={true}
|
||||
sortOrder={SORT_ORDER_DESC}
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
wrapper.find("#sort-by-last-modified i").hasClass("fa-sort-numeric-desc")
|
||||
wrapper.find("#sort-by-size i").hasClass("fa-sort-amount-down")
|
||||
).toBeTruthy()
|
||||
})
|
||||
|
||||
it("should render the date column with asc class when objects are sorted by date asc", () => {
|
||||
const sortObjects = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectsHeader
|
||||
sortObjects={sortObjects}
|
||||
sortedByLastModified={true}
|
||||
sortOrder={SORT_ORDER_ASC}
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
wrapper.find("#sort-by-last-modified i").hasClass("fa-sort-numeric-down")
|
||||
).toBeTruthy()
|
||||
})
|
||||
|
||||
it("should render the date column with desc class when objects are sorted by date desc", () => {
|
||||
const sortObjects = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectsHeader
|
||||
sortObjects={sortObjects}
|
||||
sortedByLastModified={true}
|
||||
sortOrder={SORT_ORDER_DESC}
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
wrapper.find("#sort-by-last-modified i").hasClass("fa-sort-numeric-down-alt")
|
||||
).toBeTruthy()
|
||||
})
|
||||
|
||||
|
||||
@@ -20,14 +20,13 @@ import { ObjectsListContainer } from "../ObjectsListContainer"
|
||||
|
||||
describe("ObjectsList", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsListContainer loadObjects={jest.fn()} />)
|
||||
shallow(<ObjectsListContainer objects={[]} />)
|
||||
})
|
||||
|
||||
it("should render ObjectsList with objects", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
objects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
loadObjects={jest.fn()}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.find("ObjectsList").length).toBe(1)
|
||||
@@ -37,10 +36,14 @@ describe("ObjectsList", () => {
|
||||
])
|
||||
})
|
||||
|
||||
it("should show the loading indicator at the bottom if there are more elements to display", () => {
|
||||
it("should show the loading indicator when the objects are being loaded", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer currentBucket="test1" isTruncated={true} />
|
||||
<ObjectsListContainer
|
||||
currentBucket="test1"
|
||||
objects={[]}
|
||||
listLoading={true}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.find(".text-center").prop("style")).toHaveProperty("display", "block")
|
||||
expect(wrapper.find(".loading").exists()).toBeTruthy()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { shallow, mount } from "enzyme"
|
||||
import { Path } from "../Path"
|
||||
|
||||
describe("Path", () => {
|
||||
@@ -26,7 +26,12 @@ describe("Path", () => {
|
||||
it("should render only bucket if there is no prefix", () => {
|
||||
const wrapper = shallow(<Path currentBucket={"test1"} currentPrefix={""} />)
|
||||
expect(wrapper.find("span").length).toBe(1)
|
||||
expect(wrapper.text()).toBe("test1")
|
||||
expect(
|
||||
wrapper
|
||||
.find("span")
|
||||
.at(0)
|
||||
.text()
|
||||
).toBe("test1")
|
||||
})
|
||||
|
||||
it("should render bucket and prefix", () => {
|
||||
@@ -69,4 +74,70 @@ describe("Path", () => {
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(selectPrefix).toHaveBeenCalledWith("a/b/")
|
||||
})
|
||||
|
||||
it("should switch to input mode when edit icon is clicked", () => {
|
||||
const wrapper = mount(<Path currentBucket={"test1"} currentPrefix={""} />)
|
||||
wrapper.find(".fe-edit").simulate("click", { preventDefault: jest.fn() })
|
||||
expect(wrapper.find(".form-control--path").exists()).toBeTruthy()
|
||||
})
|
||||
|
||||
it("should navigate to prefix when user types path for existing bucket", () => {
|
||||
const selectBucket = jest.fn()
|
||||
const buckets = ["test1", "test2"]
|
||||
const wrapper = mount(
|
||||
<Path
|
||||
buckets={buckets}
|
||||
currentBucket={"test1"}
|
||||
currentPrefix={""}
|
||||
selectBucket={selectBucket}
|
||||
/>
|
||||
)
|
||||
wrapper.setState({
|
||||
isEditing: true,
|
||||
path: "test2/dir1/"
|
||||
})
|
||||
wrapper.find("form").simulate("submit", { preventDefault: jest.fn() })
|
||||
expect(selectBucket).toHaveBeenCalledWith("test2", "dir1/")
|
||||
})
|
||||
|
||||
it("should create a new bucket if bucket typed in path doesn't exist", () => {
|
||||
const makeBucket = jest.fn()
|
||||
const buckets = ["test1", "test2"]
|
||||
const wrapper = mount(
|
||||
<Path
|
||||
buckets={buckets}
|
||||
currentBucket={"test1"}
|
||||
currentPrefix={""}
|
||||
makeBucket={makeBucket}
|
||||
/>
|
||||
)
|
||||
wrapper.setState({
|
||||
isEditing: true,
|
||||
path: "test3/dir1/"
|
||||
})
|
||||
wrapper.find("form").simulate("submit", { preventDefault: jest.fn() })
|
||||
expect(makeBucket).toHaveBeenCalledWith("test3")
|
||||
})
|
||||
|
||||
it("should not make or select bucket if path doesn't point to bucket", () => {
|
||||
const makeBucket = jest.fn()
|
||||
const selectBucket = jest.fn()
|
||||
const buckets = ["test1", "test2"]
|
||||
const wrapper = mount(
|
||||
<Path
|
||||
buckets={buckets}
|
||||
currentBucket={"test1"}
|
||||
currentPrefix={""}
|
||||
makeBucket={makeBucket}
|
||||
selectBucket={selectBucket}
|
||||
/>
|
||||
)
|
||||
wrapper.setState({
|
||||
isEditing: true,
|
||||
path: "//dir1/dir2/"
|
||||
})
|
||||
wrapper.find("form").simulate("submit", { preventDefault: jest.fn() })
|
||||
expect(makeBucket).not.toHaveBeenCalled()
|
||||
expect(selectBucket).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -18,7 +18,13 @@ import configureStore from "redux-mock-store"
|
||||
import thunk from "redux-thunk"
|
||||
import * as actionsObjects from "../actions"
|
||||
import * as alertActions from "../../alert/actions"
|
||||
import { minioBrowserPrefix } from "../../constants"
|
||||
import {
|
||||
minioBrowserPrefix,
|
||||
SORT_BY_NAME,
|
||||
SORT_ORDER_ASC,
|
||||
SORT_BY_LAST_MODIFIED,
|
||||
SORT_ORDER_DESC
|
||||
} from "../../constants"
|
||||
import history from "../../history"
|
||||
|
||||
jest.mock("../../web", () => ({
|
||||
@@ -37,8 +43,6 @@ jest.mock("../../web", () => ({
|
||||
} else {
|
||||
return Promise.resolve({
|
||||
objects: [{ name: "test1" }, { name: "test2" }],
|
||||
istruncated: false,
|
||||
nextmarker: "test2",
|
||||
writable: false
|
||||
})
|
||||
}
|
||||
@@ -77,17 +81,11 @@ describe("Objects actions", () => {
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_LIST",
|
||||
objects: [{ name: "test1" }, { name: "test2" }],
|
||||
isTruncated: false,
|
||||
marker: "test2"
|
||||
objects: [{ name: "test1" }, { name: "test2" }]
|
||||
}
|
||||
]
|
||||
store.dispatch(
|
||||
actionsObjects.setList(
|
||||
[{ name: "test1" }, { name: "test2" }],
|
||||
"test2",
|
||||
false
|
||||
)
|
||||
actionsObjects.setList([{ name: "test1" }, { name: "test2" }])
|
||||
)
|
||||
const actions = store.getActions()
|
||||
expect(actions).toEqual(expectedActions)
|
||||
@@ -98,10 +96,10 @@ describe("Objects actions", () => {
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_SORT_BY",
|
||||
sortBy: "name"
|
||||
sortBy: SORT_BY_NAME
|
||||
}
|
||||
]
|
||||
store.dispatch(actionsObjects.setSortBy("name"))
|
||||
store.dispatch(actionsObjects.setSortBy(SORT_BY_NAME))
|
||||
const actions = store.getActions()
|
||||
expect(actions).toEqual(expectedActions)
|
||||
})
|
||||
@@ -111,10 +109,10 @@ describe("Objects actions", () => {
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_SORT_ORDER",
|
||||
sortOrder: true
|
||||
sortOrder: SORT_ORDER_ASC
|
||||
}
|
||||
]
|
||||
store.dispatch(actionsObjects.setSortOrder(true))
|
||||
store.dispatch(actionsObjects.setSortOrder(SORT_ORDER_ASC))
|
||||
const actions = store.getActions()
|
||||
expect(actions).toEqual(expectedActions)
|
||||
})
|
||||
@@ -126,23 +124,26 @@ describe("Objects actions", () => {
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_LIST",
|
||||
objects: [{ name: "test1" }, { name: "test2" }],
|
||||
marker: "test2",
|
||||
isTruncated: false
|
||||
type: "objects/RESET_LIST"
|
||||
},
|
||||
{ listLoading: true, type: "objects/SET_LIST_LOADING" },
|
||||
{
|
||||
type: "objects/SET_SORT_BY",
|
||||
sortBy: ""
|
||||
sortBy: SORT_BY_LAST_MODIFIED
|
||||
},
|
||||
{
|
||||
type: "objects/SET_SORT_ORDER",
|
||||
sortOrder: false
|
||||
sortOrder: SORT_ORDER_DESC
|
||||
},
|
||||
{
|
||||
type: "objects/SET_LIST",
|
||||
objects: [{ name: "test2" }, { name: "test1" }]
|
||||
},
|
||||
{
|
||||
type: "objects/SET_PREFIX_WRITABLE",
|
||||
prefixWritable: false
|
||||
}
|
||||
},
|
||||
{ listLoading: false, type: "objects/SET_LIST_LOADING" }
|
||||
]
|
||||
return store.dispatch(actionsObjects.fetchObjects()).then(() => {
|
||||
const actions = store.getActions()
|
||||
@@ -150,35 +151,16 @@ describe("Objects actions", () => {
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/APPEND_LIST after fetching more objects", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "bk1" },
|
||||
objects: { currentPrefix: "" }
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/APPEND_LIST",
|
||||
objects: [{ name: "test1" }, { name: "test2" }],
|
||||
marker: "test2",
|
||||
isTruncated: false
|
||||
},
|
||||
{
|
||||
type: "objects/SET_PREFIX_WRITABLE",
|
||||
prefixWritable: false
|
||||
}
|
||||
]
|
||||
return store.dispatch(actionsObjects.fetchObjects(true)).then(() => {
|
||||
const actions = store.getActions()
|
||||
expect(actions).toEqual(expectedActions)
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/RESET_LIST after failing to fetch the objects from bucket with ListObjects denied for LoggedIn users", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "test-deny" },
|
||||
objects: { currentPrefix: "" }
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/RESET_LIST"
|
||||
},
|
||||
{ listLoading: true, type: "objects/SET_LIST_LOADING" },
|
||||
{
|
||||
type: "alert/SET",
|
||||
alert: {
|
||||
@@ -189,8 +171,9 @@ describe("Objects actions", () => {
|
||||
}
|
||||
},
|
||||
{
|
||||
type: "object/RESET_LIST"
|
||||
}
|
||||
type: "objects/RESET_LIST"
|
||||
},
|
||||
{ listLoading: false, type: "objects/SET_LIST_LOADING" }
|
||||
]
|
||||
return store.dispatch(actionsObjects.fetchObjects()).then(() => {
|
||||
const actions = store.getActions()
|
||||
@@ -213,28 +196,24 @@ describe("Objects actions", () => {
|
||||
objects: {
|
||||
list: [],
|
||||
sortBy: "",
|
||||
sortOrder: false,
|
||||
isTruncated: false,
|
||||
marker: ""
|
||||
sortOrder: SORT_ORDER_ASC
|
||||
}
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_SORT_BY",
|
||||
sortBy: "name"
|
||||
sortBy: SORT_BY_NAME
|
||||
},
|
||||
{
|
||||
type: "objects/SET_SORT_ORDER",
|
||||
sortOrder: true
|
||||
sortOrder: SORT_ORDER_ASC
|
||||
},
|
||||
{
|
||||
type: "objects/SET_LIST",
|
||||
objects: [],
|
||||
isTruncated: false,
|
||||
marker: ""
|
||||
objects: []
|
||||
}
|
||||
]
|
||||
store.dispatch(actionsObjects.sortObjects("name"))
|
||||
store.dispatch(actionsObjects.sortObjects(SORT_BY_NAME))
|
||||
const actions = store.getActions()
|
||||
expect(actions).toEqual(expectedActions)
|
||||
})
|
||||
@@ -246,6 +225,10 @@ describe("Objects actions", () => {
|
||||
})
|
||||
const expectedActions = [
|
||||
{ type: "objects/SET_CURRENT_PREFIX", prefix: "abc/" },
|
||||
{
|
||||
type: "objects/RESET_LIST"
|
||||
},
|
||||
{ listLoading: true, type: "objects/SET_LIST_LOADING" },
|
||||
{ type: "objects/CHECKED_LIST_RESET" }
|
||||
]
|
||||
store.dispatch(actionsObjects.selectPrefix("abc/"))
|
||||
|
||||
@@ -16,17 +16,17 @@
|
||||
|
||||
import reducer from "../reducer"
|
||||
import * as actions from "../actions"
|
||||
import { SORT_ORDER_ASC, SORT_BY_NAME } from "../../constants"
|
||||
|
||||
describe("objects reducer", () => {
|
||||
it("should return the initial state", () => {
|
||||
const initialState = reducer(undefined, {})
|
||||
expect(initialState).toEqual({
|
||||
list: [],
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: false,
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
currentPrefix: "",
|
||||
marker: "",
|
||||
isTruncated: false,
|
||||
prefixWritable: false,
|
||||
shareObject: {
|
||||
show: false,
|
||||
@@ -40,37 +40,9 @@ describe("objects reducer", () => {
|
||||
it("should handle SET_LIST", () => {
|
||||
const newState = reducer(undefined, {
|
||||
type: actions.SET_LIST,
|
||||
objects: [{ name: "obj1" }, { name: "obj2" }],
|
||||
marker: "obj2",
|
||||
isTruncated: false
|
||||
objects: [{ name: "obj1" }, { name: "obj2" }]
|
||||
})
|
||||
expect(newState.list).toEqual([{ name: "obj1" }, { name: "obj2" }])
|
||||
expect(newState.marker).toBe("obj2")
|
||||
expect(newState.isTruncated).toBeFalsy()
|
||||
})
|
||||
|
||||
it("should handle APPEND_LIST", () => {
|
||||
const newState = reducer(
|
||||
{
|
||||
list: [{ name: "obj1" }, { name: "obj2" }],
|
||||
marker: "obj2",
|
||||
isTruncated: true
|
||||
},
|
||||
{
|
||||
type: actions.APPEND_LIST,
|
||||
objects: [{ name: "obj3" }, { name: "obj4" }],
|
||||
marker: "obj4",
|
||||
isTruncated: false
|
||||
}
|
||||
)
|
||||
expect(newState.list).toEqual([
|
||||
{ name: "obj1" },
|
||||
{ name: "obj2" },
|
||||
{ name: "obj3" },
|
||||
{ name: "obj4" }
|
||||
])
|
||||
expect(newState.marker).toBe("obj4")
|
||||
expect(newState.isTruncated).toBeFalsy()
|
||||
})
|
||||
|
||||
it("should handle REMOVE", () => {
|
||||
@@ -98,30 +70,28 @@ describe("objects reducer", () => {
|
||||
it("should handle SET_SORT_BY", () => {
|
||||
const newState = reducer(undefined, {
|
||||
type: actions.SET_SORT_BY,
|
||||
sortBy: "name"
|
||||
sortBy: SORT_BY_NAME
|
||||
})
|
||||
expect(newState.sortBy).toEqual("name")
|
||||
expect(newState.sortBy).toEqual(SORT_BY_NAME)
|
||||
})
|
||||
|
||||
it("should handle SET_SORT_ORDER", () => {
|
||||
const newState = reducer(undefined, {
|
||||
type: actions.SET_SORT_ORDER,
|
||||
sortOrder: true
|
||||
sortOrder: SORT_ORDER_ASC
|
||||
})
|
||||
expect(newState.sortOrder).toEqual(true)
|
||||
expect(newState.sortOrder).toEqual(SORT_ORDER_ASC)
|
||||
})
|
||||
|
||||
it("should handle SET_CURRENT_PREFIX", () => {
|
||||
const newState = reducer(
|
||||
{ currentPrefix: "test1/", marker: "abc", isTruncated: true },
|
||||
{ currentPrefix: "test1/" },
|
||||
{
|
||||
type: actions.SET_CURRENT_PREFIX,
|
||||
prefix: "test2/"
|
||||
}
|
||||
)
|
||||
expect(newState.currentPrefix).toEqual("test2/")
|
||||
expect(newState.marker).toEqual("")
|
||||
expect(newState.isTruncated).toBeFalsy()
|
||||
})
|
||||
|
||||
it("should handle SET_PREFIX_WRITABLE", () => {
|
||||
|
||||
@@ -16,15 +16,26 @@
|
||||
|
||||
import web from "../web"
|
||||
import history from "../history"
|
||||
import { sortObjectsByName, sortObjectsBySize, sortObjectsByDate } from "../utils"
|
||||
import {
|
||||
sortObjectsByName,
|
||||
sortObjectsBySize,
|
||||
sortObjectsByDate
|
||||
} from "../utils"
|
||||
import { getCurrentBucket } from "../buckets/selectors"
|
||||
import { getCurrentPrefix, getCheckedList } from "./selectors"
|
||||
import * as alertActions from "../alert/actions"
|
||||
import * as bucketActions from "../buckets/actions"
|
||||
import { minioBrowserPrefix } from "../constants"
|
||||
import {
|
||||
minioBrowserPrefix,
|
||||
SORT_BY_NAME,
|
||||
SORT_BY_SIZE,
|
||||
SORT_BY_LAST_MODIFIED,
|
||||
SORT_ORDER_ASC,
|
||||
SORT_ORDER_DESC
|
||||
} from "../constants"
|
||||
|
||||
export const SET_LIST = "objects/SET_LIST"
|
||||
export const RESET_LIST = "object/RESET_LIST"
|
||||
export const RESET_LIST = "objects/RESET_LIST"
|
||||
export const APPEND_LIST = "objects/APPEND_LIST"
|
||||
export const REMOVE = "objects/REMOVE"
|
||||
export const SET_SORT_BY = "objects/SET_SORT_BY"
|
||||
@@ -35,53 +46,63 @@ export const SET_SHARE_OBJECT = "objects/SET_SHARE_OBJECT"
|
||||
export const CHECKED_LIST_ADD = "objects/CHECKED_LIST_ADD"
|
||||
export const CHECKED_LIST_REMOVE = "objects/CHECKED_LIST_REMOVE"
|
||||
export const CHECKED_LIST_RESET = "objects/CHECKED_LIST_RESET"
|
||||
export const SET_LIST_LOADING = "objects/SET_LIST_LOADING"
|
||||
|
||||
export const setList = (objects, marker, isTruncated) => ({
|
||||
export const setList = objects => ({
|
||||
type: SET_LIST,
|
||||
objects,
|
||||
marker,
|
||||
isTruncated
|
||||
objects
|
||||
})
|
||||
|
||||
export const resetList = () => ({
|
||||
type: RESET_LIST
|
||||
})
|
||||
|
||||
export const appendList = (objects, marker, isTruncated) => ({
|
||||
type: APPEND_LIST,
|
||||
objects,
|
||||
marker,
|
||||
isTruncated
|
||||
export const setListLoading = listLoading => ({
|
||||
type: SET_LIST_LOADING,
|
||||
listLoading
|
||||
})
|
||||
|
||||
export const fetchObjects = append => {
|
||||
export const fetchObjects = () => {
|
||||
return function(dispatch, getState) {
|
||||
const {buckets: {currentBucket}, objects: {currentPrefix, marker}} = getState()
|
||||
dispatch(resetList())
|
||||
const {
|
||||
buckets: { currentBucket },
|
||||
objects: { currentPrefix }
|
||||
} = getState()
|
||||
if (currentBucket) {
|
||||
dispatch(setListLoading(true))
|
||||
return web
|
||||
.ListObjects({
|
||||
bucketName: currentBucket,
|
||||
prefix: currentPrefix,
|
||||
marker: append ? marker : ""
|
||||
prefix: currentPrefix
|
||||
})
|
||||
.then(res => {
|
||||
let objects = []
|
||||
if (res.objects) {
|
||||
objects = res.objects.map(object => {
|
||||
return {
|
||||
...object,
|
||||
name: object.name.replace(currentPrefix, "")
|
||||
}
|
||||
})
|
||||
// we need to check if the bucket name and prefix are the same as
|
||||
// when the request was made before updating the displayed objects
|
||||
if (
|
||||
currentBucket === getCurrentBucket(getState()) &&
|
||||
currentPrefix === getCurrentPrefix(getState())
|
||||
) {
|
||||
let objects = []
|
||||
if (res.objects) {
|
||||
objects = res.objects.map(object => {
|
||||
return {
|
||||
...object,
|
||||
name: object.name.replace(currentPrefix, "")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const sortBy = SORT_BY_LAST_MODIFIED
|
||||
const sortOrder = SORT_ORDER_DESC
|
||||
dispatch(setSortBy(sortBy))
|
||||
dispatch(setSortOrder(sortOrder))
|
||||
const sortedList = sortObjectsList(objects, sortBy, sortOrder)
|
||||
dispatch(setList(sortedList))
|
||||
|
||||
dispatch(setPrefixWritable(res.writable))
|
||||
dispatch(setListLoading(false))
|
||||
}
|
||||
if (append) {
|
||||
dispatch(appendList(objects, res.nextmarker, res.istruncated))
|
||||
} else {
|
||||
dispatch(setList(objects, res.nextmarker, res.istruncated))
|
||||
dispatch(setSortBy(""))
|
||||
dispatch(setSortOrder(false))
|
||||
}
|
||||
dispatch(setPrefixWritable(res.writable))
|
||||
})
|
||||
.catch(err => {
|
||||
if (web.LoggedIn()) {
|
||||
@@ -96,6 +117,7 @@ export const fetchObjects = append => {
|
||||
} else {
|
||||
history.push("/login")
|
||||
}
|
||||
dispatch(setListLoading(false))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -103,26 +125,27 @@ export const fetchObjects = append => {
|
||||
|
||||
export const sortObjects = sortBy => {
|
||||
return function(dispatch, getState) {
|
||||
const {objects} = getState()
|
||||
const sortOrder = objects.sortBy == sortBy ? !objects.sortOrder : true
|
||||
const { objects } = getState()
|
||||
let sortOrder = SORT_ORDER_ASC
|
||||
// Reverse sort order if the list is already sorted on same field
|
||||
if (objects.sortBy === sortBy && objects.sortOrder === SORT_ORDER_ASC) {
|
||||
sortOrder = SORT_ORDER_DESC
|
||||
}
|
||||
dispatch(setSortBy(sortBy))
|
||||
dispatch(setSortOrder(sortOrder))
|
||||
let list
|
||||
switch (sortBy) {
|
||||
case "name":
|
||||
list = sortObjectsByName(objects.list, sortOrder)
|
||||
break
|
||||
case "size":
|
||||
list = sortObjectsBySize(objects.list, sortOrder)
|
||||
break
|
||||
case "last-modified":
|
||||
list = sortObjectsByDate(objects.list, sortOrder)
|
||||
break
|
||||
default:
|
||||
list = objects.list
|
||||
break
|
||||
}
|
||||
dispatch(setList(list, objects.marker, objects.isTruncated))
|
||||
const sortedList = sortObjectsList(objects.list, sortBy, sortOrder)
|
||||
dispatch(setList(sortedList))
|
||||
}
|
||||
}
|
||||
|
||||
const sortObjectsList = (list, sortBy, sortOrder) => {
|
||||
switch (sortBy) {
|
||||
case SORT_BY_NAME:
|
||||
return sortObjectsByName(list, sortOrder)
|
||||
case SORT_BY_SIZE:
|
||||
return sortObjectsBySize(list, sortOrder)
|
||||
case SORT_BY_LAST_MODIFIED:
|
||||
return sortObjectsByDate(list, sortOrder)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -208,7 +231,8 @@ export const shareObject = (object, days, hours, minutes) => {
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName
|
||||
object: objectName,
|
||||
expiry: expiry
|
||||
})
|
||||
.then(obj => {
|
||||
dispatch(showShareObject(object, obj.url))
|
||||
@@ -228,7 +252,16 @@ export const shareObject = (object, days, hours, minutes) => {
|
||||
)
|
||||
})
|
||||
} else {
|
||||
dispatch(showShareObject(object, `${location.host}` + '/' + `${currentBucket}` + '/' + encodeURI(objectName)))
|
||||
dispatch(
|
||||
showShareObject(
|
||||
object,
|
||||
`${location.host}` +
|
||||
"/" +
|
||||
`${currentBucket}` +
|
||||
"/" +
|
||||
encodeURI(objectName)
|
||||
)
|
||||
)
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
@@ -321,13 +354,14 @@ export const downloadCheckedObjects = () => {
|
||||
}${minioBrowserPrefix}/zip?token=${res.token}`
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
})
|
||||
.catch(err => dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
.catch(err =>
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -350,7 +384,8 @@ const downloadZip = (url, req, dispatch) => {
|
||||
var separator = req.prefix.length > 1 ? "-" : ""
|
||||
|
||||
anchor.href = blobUrl
|
||||
anchor.download = req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
|
||||
anchor.download =
|
||||
req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
|
||||
|
||||
anchor.click()
|
||||
window.URL.revokeObjectURL(blobUrl)
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
*/
|
||||
|
||||
import * as actionsObjects from "./actions"
|
||||
import { SORT_ORDER_ASC } from "../constants"
|
||||
|
||||
const removeObject = (list, objectToRemove, lookup) => {
|
||||
const idx = list.findIndex(object => lookup(object) === objectToRemove)
|
||||
@@ -27,11 +28,10 @@ const removeObject = (list, objectToRemove, lookup) => {
|
||||
export default (
|
||||
state = {
|
||||
list: [],
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: false,
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
currentPrefix: "",
|
||||
marker: "",
|
||||
isTruncated: false,
|
||||
prefixWritable: false,
|
||||
shareObject: {
|
||||
show: false,
|
||||
@@ -46,23 +46,17 @@ export default (
|
||||
case actionsObjects.SET_LIST:
|
||||
return {
|
||||
...state,
|
||||
list: action.objects,
|
||||
marker: action.marker,
|
||||
isTruncated: action.isTruncated
|
||||
list: action.objects
|
||||
}
|
||||
case actionsObjects.RESET_LIST:
|
||||
return {
|
||||
...state,
|
||||
list: [],
|
||||
marker: "",
|
||||
isTruncated: false
|
||||
list: []
|
||||
}
|
||||
case actionsObjects.APPEND_LIST:
|
||||
case actionsObjects.SET_LIST_LOADING:
|
||||
return {
|
||||
...state,
|
||||
list: [...state.list, ...action.objects],
|
||||
marker: action.marker,
|
||||
isTruncated: action.isTruncated
|
||||
listLoading: action.listLoading
|
||||
}
|
||||
case actionsObjects.REMOVE:
|
||||
return {
|
||||
@@ -82,9 +76,7 @@ export default (
|
||||
case actionsObjects.SET_CURRENT_PREFIX:
|
||||
return {
|
||||
...state,
|
||||
currentPrefix: action.prefix,
|
||||
marker: "",
|
||||
isTruncated: false
|
||||
currentPrefix: action.prefix
|
||||
}
|
||||
case actionsObjects.SET_PREFIX_WRITABLE:
|
||||
return {
|
||||
|
||||
@@ -33,12 +33,12 @@ export class AbortConfirmModal extends React.Component {
|
||||
"abort-upload": true
|
||||
})
|
||||
let okIcon = classNames({
|
||||
fa: true,
|
||||
fas: true,
|
||||
"fa-times": true
|
||||
})
|
||||
let cancelIcon = classNames({
|
||||
fa: true,
|
||||
"fa-cloud-upload": true
|
||||
fas: true,
|
||||
"fa-cloud-upload-alt": true
|
||||
})
|
||||
|
||||
return (
|
||||
@@ -46,7 +46,7 @@ export class AbortConfirmModal extends React.Component {
|
||||
show={true}
|
||||
baseClass={baseClass}
|
||||
text="Abort uploads in progress?"
|
||||
icon="fa fa-info-circle mci-amber"
|
||||
icon="fas fa-info-circle mci-amber"
|
||||
sub="This cannot be undone!"
|
||||
okText="Abort"
|
||||
okIcon={okIcon}
|
||||
|
||||
@@ -14,11 +14,11 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { minioBrowserPrefix } from './constants.js'
|
||||
import { minioBrowserPrefix, SORT_ORDER_DESC } from "./constants.js"
|
||||
|
||||
export const sortObjectsByName = (objects, order) => {
|
||||
let folders = objects.filter(object => object.name.endsWith('/'))
|
||||
let files = objects.filter(object => !object.name.endsWith('/'))
|
||||
let folders = objects.filter(object => object.name.endsWith("/"))
|
||||
let files = objects.filter(object => !object.name.endsWith("/"))
|
||||
folders = folders.sort((a, b) => {
|
||||
if (a.name.toLowerCase() < b.name.toLowerCase()) return -1
|
||||
if (a.name.toLowerCase() > b.name.toLowerCase()) return 1
|
||||
@@ -29,7 +29,7 @@ export const sortObjectsByName = (objects, order) => {
|
||||
if (a.name.toLowerCase() > b.name.toLowerCase()) return 1
|
||||
return 0
|
||||
})
|
||||
if (order) {
|
||||
if (order === SORT_ORDER_DESC) {
|
||||
folders = folders.reverse()
|
||||
files = files.reverse()
|
||||
}
|
||||
@@ -37,32 +37,34 @@ export const sortObjectsByName = (objects, order) => {
|
||||
}
|
||||
|
||||
export const sortObjectsBySize = (objects, order) => {
|
||||
let folders = objects.filter(object => object.name.endsWith('/'))
|
||||
let files = objects.filter(object => !object.name.endsWith('/'))
|
||||
let folders = objects.filter(object => object.name.endsWith("/"))
|
||||
let files = objects.filter(object => !object.name.endsWith("/"))
|
||||
files = files.sort((a, b) => a.size - b.size)
|
||||
if (order)
|
||||
files = files.reverse()
|
||||
if (order === SORT_ORDER_DESC) files = files.reverse()
|
||||
return [...folders, ...files]
|
||||
}
|
||||
|
||||
export const sortObjectsByDate = (objects, order) => {
|
||||
let folders = objects.filter(object => object.name.endsWith('/'))
|
||||
let files = objects.filter(object => !object.name.endsWith('/'))
|
||||
files = files.sort((a, b) => new Date(a.lastModified).getTime() - new Date(b.lastModified).getTime())
|
||||
if (order)
|
||||
files = files.reverse()
|
||||
let folders = objects.filter(object => object.name.endsWith("/"))
|
||||
let files = objects.filter(object => !object.name.endsWith("/"))
|
||||
files = files.sort(
|
||||
(a, b) =>
|
||||
new Date(a.lastModified).getTime() - new Date(b.lastModified).getTime()
|
||||
)
|
||||
if (order === SORT_ORDER_DESC) files = files.reverse()
|
||||
return [...folders, ...files]
|
||||
}
|
||||
|
||||
export const pathSlice = (path) => {
|
||||
path = path.replace(minioBrowserPrefix, '')
|
||||
let prefix = ''
|
||||
let bucket = ''
|
||||
if (!path) return {
|
||||
export const pathSlice = path => {
|
||||
path = path.replace(minioBrowserPrefix, "")
|
||||
let prefix = ""
|
||||
let bucket = ""
|
||||
if (!path)
|
||||
return {
|
||||
bucket,
|
||||
prefix
|
||||
}
|
||||
let objectIndex = path.indexOf('/', 1)
|
||||
}
|
||||
let objectIndex = path.indexOf("/", 1)
|
||||
if (objectIndex == -1) {
|
||||
bucket = path.slice(1)
|
||||
return {
|
||||
@@ -79,7 +81,29 @@ export const pathSlice = (path) => {
|
||||
}
|
||||
|
||||
export const pathJoin = (bucket, prefix) => {
|
||||
if (!prefix)
|
||||
prefix = ''
|
||||
return minioBrowserPrefix + '/' + bucket + '/' + prefix
|
||||
if (!prefix) prefix = ""
|
||||
return minioBrowserPrefix + "/" + bucket + "/" + prefix
|
||||
}
|
||||
|
||||
export const getRandomAccessKey = () => {
|
||||
const alphaNumericTable = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
let arr = new Uint8Array(20)
|
||||
window.crypto.getRandomValues(arr)
|
||||
const random = Array.prototype.map.call(arr, v => {
|
||||
const i = v % alphaNumericTable.length
|
||||
return alphaNumericTable.charAt(i)
|
||||
})
|
||||
return random.join("")
|
||||
}
|
||||
|
||||
export const getRandomSecretKey = () => {
|
||||
let arr = new Uint8Array(40)
|
||||
window.crypto.getRandomValues(arr)
|
||||
const binStr = Array.prototype.map
|
||||
.call(arr, v => {
|
||||
return String.fromCharCode(v)
|
||||
})
|
||||
.join("")
|
||||
const base64Str = btoa(binStr)
|
||||
return base64Str.replace(/\//g, "+").substr(0, 40)
|
||||
}
|
||||
|
||||
@@ -72,6 +72,9 @@ class Web {
|
||||
Logout() {
|
||||
storage.removeItem('token')
|
||||
}
|
||||
GetToken() {
|
||||
return storage.getItem('token')
|
||||
}
|
||||
ServerInfo() {
|
||||
return this.makeCall('ServerInfo')
|
||||
}
|
||||
@@ -99,12 +102,6 @@ class Web {
|
||||
RemoveObject(args) {
|
||||
return this.makeCall('RemoveObject', args)
|
||||
}
|
||||
GetAuth() {
|
||||
return this.makeCall('GetAuth')
|
||||
}
|
||||
GenerateAuth() {
|
||||
return this.makeCall('GenerateAuth')
|
||||
}
|
||||
SetAuth(args) {
|
||||
return this.makeCall('SetAuth', args)
|
||||
.then(res => {
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
.form-control--path {
|
||||
color: @link-color;
|
||||
padding: 5px 5px 6px 0;
|
||||
font-size: 16px;
|
||||
.placeholder(@text-muted-color)
|
||||
}
|
||||
|
||||
select.form-control {
|
||||
-webkit-appearance: none;
|
||||
-moz-appearance: none;
|
||||
@@ -153,7 +160,7 @@ select.form-control {
|
||||
}
|
||||
&:after {
|
||||
content: "\f05a";
|
||||
font-family: FontAwesome;
|
||||
font-family: 'Font Awesome 5 Free';
|
||||
position: absolute;
|
||||
top: 17px;
|
||||
right: 9px;
|
||||
@@ -165,6 +172,7 @@ select.form-control {
|
||||
.ig-search {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
font-weight: 900;
|
||||
content: '\f002';
|
||||
font-size: 15px;
|
||||
position: absolute;
|
||||
|
||||
@@ -16,6 +16,10 @@
|
||||
font-weight: normal;
|
||||
margin: 0;
|
||||
|
||||
@media(min-width: (@screen-md-min)) {
|
||||
width: calc(100% - 60px);
|
||||
}
|
||||
|
||||
& > span {
|
||||
margin-bottom: 7px;
|
||||
display: inline-block;
|
||||
@@ -27,7 +31,7 @@
|
||||
color: @text-color;
|
||||
}
|
||||
}
|
||||
&:last-child {
|
||||
&:last-of-type {
|
||||
&:after {
|
||||
content: '/';
|
||||
margin: 0 4px;
|
||||
@@ -43,6 +47,19 @@
|
||||
}
|
||||
|
||||
|
||||
/*--------------------------
|
||||
Edit path
|
||||
----------------------------*/
|
||||
.fe-edit {
|
||||
font-size: 20px;
|
||||
color: @link-color;
|
||||
margin-left: 4px;
|
||||
|
||||
i {
|
||||
vertical-align: middle;
|
||||
}
|
||||
}
|
||||
|
||||
/*--------------------------
|
||||
Disk used
|
||||
----------------------------*/
|
||||
@@ -132,7 +149,7 @@
|
||||
@media(max-width: (@screen-sm-max)) {
|
||||
background: url(../../img/more-h-light.svg) no-repeat center;
|
||||
|
||||
.fa-reorder {
|
||||
.fa-bars {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +43,9 @@ header.fesl-row {
|
||||
color: @dark-gray;
|
||||
font-size: 14px;
|
||||
}
|
||||
& > .fesli-sort--active {
|
||||
.opacity(0.5);
|
||||
}
|
||||
|
||||
&:hover:not(.fesl-item-actions) {
|
||||
background: lighten(@text-muted-color, 22%);
|
||||
@@ -110,7 +113,7 @@ div.fesl-row {
|
||||
Icons
|
||||
----------------------------*/
|
||||
&[data-type=folder] {
|
||||
.list-type(#a1d6dd, '\f114');
|
||||
.list-type(#a1d6dd, '\f07b');
|
||||
|
||||
.fesl-item-name {
|
||||
a {
|
||||
@@ -125,8 +128,8 @@ div.fesl-row {
|
||||
&[data-type=excel] { .list-type(#64c866, '\f1c3'); }
|
||||
&[data-type=image] { .list-type(#f06292, '\f1c5'); }
|
||||
&[data-type=video] { .list-type(#f8c363, '\f1c8'); }
|
||||
&[data-type=other] { .list-type(#afafaf, '\f016'); }
|
||||
&[data-type=text] { .list-type(#8a8a8a, '\f0f6'); }
|
||||
&[data-type=other] { .list-type(#afafaf, '\f15b'); }
|
||||
&[data-type=text] { .list-type(#8a8a8a, '\f15c'); }
|
||||
&[data-type=doc] { .list-type(#2196f5, '\f1c2'); }
|
||||
&[data-type=presentation] { .list-type(#896ea6, '\f1c4'); }
|
||||
|
||||
@@ -246,6 +249,7 @@ div.fesl-row {
|
||||
|
||||
&:after {
|
||||
font-family: @font-family-icon;
|
||||
font-weight: 900;
|
||||
content: '\f00c';
|
||||
top: 8px;
|
||||
left: 9px;
|
||||
@@ -446,7 +450,7 @@ div.fesl-row {
|
||||
float: left;
|
||||
padding: 4px 0;
|
||||
|
||||
.fa {
|
||||
.fas {
|
||||
font-size: 22px;
|
||||
vertical-align: top;
|
||||
margin-right: 10px;
|
||||
|
||||
@@ -113,4 +113,41 @@
|
||||
margin: 0;
|
||||
vertical-align: top;
|
||||
}
|
||||
}
|
||||
|
||||
.loading {
|
||||
position: absolute;
|
||||
margin: auto;
|
||||
left: 0;
|
||||
right: 0;
|
||||
top: 0;
|
||||
bottom: 0;
|
||||
border-top: 1px solid @loading-track-bg;
|
||||
border-right: 1px solid @loading-track-bg;
|
||||
border-bottom: 1px solid @loading-track-bg;
|
||||
border-left: 1px solid @loading-point-bg;
|
||||
transform: translateZ(0);
|
||||
animation: loading 1.1s infinite linear;
|
||||
border-radius: 50%;
|
||||
width: 35px;
|
||||
height: 35px;
|
||||
margin-top: 30px;
|
||||
}
|
||||
|
||||
@-webkit-keyframes loading {
|
||||
0% {
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes loading {
|
||||
0% {
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
@@ -190,8 +190,8 @@
|
||||
----------------------------*/
|
||||
.toggle-password {
|
||||
position: absolute;
|
||||
bottom: 30px;
|
||||
right: 35px;
|
||||
bottom: 0 ;
|
||||
right: 0;
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
border: 1px solid #eee;
|
||||
@@ -206,6 +206,10 @@
|
||||
background: #eee;
|
||||
}
|
||||
}
|
||||
|
||||
.has-toggle-password {
|
||||
position: relative;
|
||||
}
|
||||
//--------------------------
|
||||
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@
|
||||
word-wrap: break-word;
|
||||
|
||||
&:before {
|
||||
font-family: FontAwesome;
|
||||
font-family: 'Font Awesome 5 Free';
|
||||
content: '\f0a0';
|
||||
font-size: 17px;
|
||||
position: absolute;
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Base
|
||||
----------------------------*/
|
||||
@font-family-sans-serif : 'Lato', sans-serif;
|
||||
@font-family-icon : 'fontAwesome';
|
||||
@font-family-icon : 'Font Awesome 5 Free';
|
||||
@body-bg : #edecec;
|
||||
@text-color : #8e8e8e;
|
||||
@font-size-base : 15px;
|
||||
@@ -100,4 +100,10 @@
|
||||
List
|
||||
--------------------------*/
|
||||
@list-row-selected-bg: #fbf2bf;
|
||||
@list-row-even-bg: #fafafa;
|
||||
@list-row-even-bg: #fafafa;
|
||||
|
||||
/*--------------------------
|
||||
Loading
|
||||
---------------------------*/
|
||||
@loading-track-bg: #eeeeee;
|
||||
@loading-point-bg: #00303f;
|
||||
14239
browser/package-lock.json
generated
Normal file
14239
browser/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
@@ -29,8 +29,8 @@
|
||||
"homepage": "https://github.com/minio/minio",
|
||||
"devDependencies": {
|
||||
"async": "^1.5.2",
|
||||
"babel-cli": "^6.14.0",
|
||||
"babel-core": "^6.14.0",
|
||||
"babel-cli": "^6.26.0",
|
||||
"babel-core": "^6.26.3",
|
||||
"babel-jest": "^22.1.0",
|
||||
"babel-loader": "^7.1.2",
|
||||
"babel-plugin-syntax-object-rest-spread": "^6.13.0",
|
||||
@@ -38,10 +38,10 @@
|
||||
"babel-polyfill": "^6.23.0",
|
||||
"babel-preset-es2015": "^6.14.0",
|
||||
"babel-preset-react": "^6.11.1",
|
||||
"babel-register": "^6.14.0",
|
||||
"babel-register": "^6.26.0",
|
||||
"copy-webpack-plugin": "^4.6.0",
|
||||
"css-loader": "^0.23.1",
|
||||
"enzyme": "^3.3.0",
|
||||
"enzyme": "^3.10.0",
|
||||
"enzyme-adapter-react-16": "^1.1.1",
|
||||
"esformatter": "^0.10.0",
|
||||
"esformatter-jsx": "^7.4.1",
|
||||
@@ -59,20 +59,21 @@
|
||||
"webpack-dev-server": "^3.1.14"
|
||||
},
|
||||
"dependencies": {
|
||||
"@fortawesome/fontawesome-free": "^5.10.0",
|
||||
"bootstrap": "^3.3.6",
|
||||
"classnames": "^2.2.3",
|
||||
"expect": "^1.20.2",
|
||||
"font-awesome": "^4.7.0",
|
||||
"glob-all": "^3.1.0",
|
||||
"history": "^4.7.2",
|
||||
"humanize": "0.0.9",
|
||||
"identity-obj-proxy": "^3.0.0",
|
||||
"json-loader": "^0.5.4",
|
||||
"jwt-decode": "^2.2.0",
|
||||
"local-storage-fallback": "^4.0.2",
|
||||
"material-design-iconic-font": "^2.2.0",
|
||||
"mime-db": "^1.25.0",
|
||||
"mime-types": "^2.1.13",
|
||||
"moment": "^2.15.1",
|
||||
"moment": "^2.24.0",
|
||||
"react": "^16.2.0",
|
||||
"react-addons-test-utils": "^0.14.8",
|
||||
"react-bootstrap": "^0.32.0",
|
||||
@@ -81,7 +82,7 @@
|
||||
"react-dom": "^16.2.0",
|
||||
"react-dropzone": "^4.2.3",
|
||||
"react-infinite-scroller": "^1.0.6",
|
||||
"react-onclickout": "2.0.4",
|
||||
"react-onclickout": "^2.0.8",
|
||||
"react-redux": "^5.0.6",
|
||||
"react-router-dom": "^4.2.0",
|
||||
"redux": "^3.7.2",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -10,6 +10,11 @@
|
||||
esutils "^2.0.2"
|
||||
js-tokens "^3.0.0"
|
||||
|
||||
"@fortawesome/fontawesome-free@^5.10.0":
|
||||
version "5.10.0"
|
||||
resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-free/-/fontawesome-free-5.10.0.tgz#dbfb34093c87c9516f03ee1db3385adae3ae9461"
|
||||
integrity sha512-XX16koDMY/tkJmec0VFfKF7RYZOze/203B1iyLnRaAySm3ZPhKaeyIpf73Yh8xhrMk3Fj4TeH3FC01qyFTyg8g==
|
||||
|
||||
"@types/node@*":
|
||||
version "9.4.0"
|
||||
resolved "https://registry.yarnpkg.com/@types/node/-/node-9.4.0.tgz#b85a0bcf1e1cc84eb4901b7e96966aedc6f078d1"
|
||||
@@ -251,22 +256,10 @@ ajv@^6.1.0:
|
||||
json-schema-traverse "^0.4.1"
|
||||
uri-js "^4.2.2"
|
||||
|
||||
align-text@^0.1.1, align-text@^0.1.3:
|
||||
version "0.1.4"
|
||||
resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117"
|
||||
dependencies:
|
||||
kind-of "^3.0.2"
|
||||
longest "^1.0.1"
|
||||
repeat-string "^1.5.2"
|
||||
|
||||
alphanum-sort@^1.0.1, alphanum-sort@^1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/alphanum-sort/-/alphanum-sort-1.0.2.tgz#97a1119649b211ad33691d9f9f486a8ec9fbe0a3"
|
||||
|
||||
amdefine@>=0.0.4:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
|
||||
|
||||
ansi-colors@^3.0.0:
|
||||
version "3.2.3"
|
||||
resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-3.2.3.tgz#57d35b8686e851e2cc04c403f1c00203976a1813"
|
||||
@@ -378,6 +371,11 @@ array-equal@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
|
||||
|
||||
array-filter@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/array-filter/-/array-filter-1.0.0.tgz#baf79e62e6ef4c2a4c0b831232daffec251f9d83"
|
||||
integrity sha1-uveeYubvTCpMC4MSMtr/7CUfnYM=
|
||||
|
||||
array-flatten@1.1.1:
|
||||
version "1.1.1"
|
||||
resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2"
|
||||
@@ -410,6 +408,15 @@ array-unique@^0.3.2:
|
||||
resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.3.2.tgz#a894b75d4bc4f6cd679ef3244a9fd8f46ae2d428"
|
||||
integrity sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=
|
||||
|
||||
array.prototype.flat@^1.2.1:
|
||||
version "1.2.1"
|
||||
resolved "https://registry.yarnpkg.com/array.prototype.flat/-/array.prototype.flat-1.2.1.tgz#812db8f02cad24d3fab65dd67eabe3b8903494a4"
|
||||
integrity sha512-rVqIs330nLJvfC7JqYvEWwqVr5QjYF1ib02i3YJtR/fICO6527Tjpc/e4Mvmxh3GIePPreRXMdaGyC99YphWEw==
|
||||
dependencies:
|
||||
define-properties "^1.1.2"
|
||||
es-abstract "^1.10.0"
|
||||
function-bind "^1.1.1"
|
||||
|
||||
arrify@^1.0.0, arrify@^1.0.1:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
|
||||
@@ -467,7 +474,7 @@ async-limiter@~1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.0.tgz#78faed8c3d074ab81f22b4e985d79e8738f720f8"
|
||||
|
||||
async@^1.4.0, async@^1.5.2:
|
||||
async@^1.5.2:
|
||||
version "1.5.2"
|
||||
resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a"
|
||||
|
||||
@@ -520,24 +527,25 @@ aws4@^1.6.0:
|
||||
version "1.6.0"
|
||||
resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e"
|
||||
|
||||
babel-cli@^6.14.0:
|
||||
version "6.24.1"
|
||||
resolved "https://registry.yarnpkg.com/babel-cli/-/babel-cli-6.24.1.tgz#207cd705bba61489b2ea41b5312341cf6aca2283"
|
||||
babel-cli@^6.26.0:
|
||||
version "6.26.0"
|
||||
resolved "https://registry.yarnpkg.com/babel-cli/-/babel-cli-6.26.0.tgz#502ab54874d7db88ad00b887a06383ce03d002f1"
|
||||
integrity sha1-UCq1SHTX24itALiHoGODzgPQAvE=
|
||||
dependencies:
|
||||
babel-core "^6.24.1"
|
||||
babel-polyfill "^6.23.0"
|
||||
babel-register "^6.24.1"
|
||||
babel-runtime "^6.22.0"
|
||||
commander "^2.8.1"
|
||||
convert-source-map "^1.1.0"
|
||||
babel-core "^6.26.0"
|
||||
babel-polyfill "^6.26.0"
|
||||
babel-register "^6.26.0"
|
||||
babel-runtime "^6.26.0"
|
||||
commander "^2.11.0"
|
||||
convert-source-map "^1.5.0"
|
||||
fs-readdir-recursive "^1.0.0"
|
||||
glob "^7.0.0"
|
||||
lodash "^4.2.0"
|
||||
output-file-sync "^1.1.0"
|
||||
path-is-absolute "^1.0.0"
|
||||
glob "^7.1.2"
|
||||
lodash "^4.17.4"
|
||||
output-file-sync "^1.1.2"
|
||||
path-is-absolute "^1.0.1"
|
||||
slash "^1.0.0"
|
||||
source-map "^0.5.0"
|
||||
v8flags "^2.0.10"
|
||||
source-map "^0.5.6"
|
||||
v8flags "^2.1.1"
|
||||
optionalDependencies:
|
||||
chokidar "^1.6.1"
|
||||
|
||||
@@ -581,29 +589,30 @@ babel-core@^6.0.0, babel-core@^6.26.0:
|
||||
slash "^1.0.0"
|
||||
source-map "^0.5.6"
|
||||
|
||||
babel-core@^6.14.0, babel-core@^6.24.1:
|
||||
version "6.25.0"
|
||||
resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.25.0.tgz#7dd42b0463c742e9d5296deb3ec67a9322dad729"
|
||||
babel-core@^6.26.3:
|
||||
version "6.26.3"
|
||||
resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.26.3.tgz#b2e2f09e342d0f0c88e2f02e067794125e75c207"
|
||||
integrity sha512-6jyFLuDmeidKmUEb3NM+/yawG0M2bDZ9Z1qbZP59cyHLz8kYGKYwpJP0UwUKKUiTRNvxfLesJnTedqczP7cTDA==
|
||||
dependencies:
|
||||
babel-code-frame "^6.22.0"
|
||||
babel-generator "^6.25.0"
|
||||
babel-code-frame "^6.26.0"
|
||||
babel-generator "^6.26.0"
|
||||
babel-helpers "^6.24.1"
|
||||
babel-messages "^6.23.0"
|
||||
babel-register "^6.24.1"
|
||||
babel-runtime "^6.22.0"
|
||||
babel-template "^6.25.0"
|
||||
babel-traverse "^6.25.0"
|
||||
babel-types "^6.25.0"
|
||||
babylon "^6.17.2"
|
||||
convert-source-map "^1.1.0"
|
||||
debug "^2.1.1"
|
||||
json5 "^0.5.0"
|
||||
lodash "^4.2.0"
|
||||
minimatch "^3.0.2"
|
||||
path-is-absolute "^1.0.0"
|
||||
private "^0.1.6"
|
||||
babel-register "^6.26.0"
|
||||
babel-runtime "^6.26.0"
|
||||
babel-template "^6.26.0"
|
||||
babel-traverse "^6.26.0"
|
||||
babel-types "^6.26.0"
|
||||
babylon "^6.18.0"
|
||||
convert-source-map "^1.5.1"
|
||||
debug "^2.6.9"
|
||||
json5 "^0.5.1"
|
||||
lodash "^4.17.4"
|
||||
minimatch "^3.0.4"
|
||||
path-is-absolute "^1.0.1"
|
||||
private "^0.1.8"
|
||||
slash "^1.0.0"
|
||||
source-map "^0.5.0"
|
||||
source-map "^0.5.7"
|
||||
|
||||
babel-generator@^6.18.0, babel-generator@^6.26.0:
|
||||
version "6.26.0"
|
||||
@@ -618,19 +627,6 @@ babel-generator@^6.18.0, babel-generator@^6.26.0:
|
||||
source-map "^0.5.6"
|
||||
trim-right "^1.0.1"
|
||||
|
||||
babel-generator@^6.25.0:
|
||||
version "6.25.0"
|
||||
resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.25.0.tgz#33a1af70d5f2890aeb465a4a7793c1df6a9ea9fc"
|
||||
dependencies:
|
||||
babel-messages "^6.23.0"
|
||||
babel-runtime "^6.22.0"
|
||||
babel-types "^6.25.0"
|
||||
detect-indent "^4.0.0"
|
||||
jsesc "^1.3.0"
|
||||
lodash "^4.2.0"
|
||||
source-map "^0.5.0"
|
||||
trim-right "^1.0.1"
|
||||
|
||||
babel-helper-builder-react-jsx@^6.24.1:
|
||||
version "6.24.1"
|
||||
resolved "https://registry.yarnpkg.com/babel-helper-builder-react-jsx/-/babel-helper-builder-react-jsx-6.24.1.tgz#0ad7917e33c8d751e646daca4e77cc19377d2cbc"
|
||||
@@ -997,6 +993,15 @@ babel-polyfill@6.23.0, babel-polyfill@^6.23.0:
|
||||
core-js "^2.4.0"
|
||||
regenerator-runtime "^0.10.0"
|
||||
|
||||
babel-polyfill@^6.26.0:
|
||||
version "6.26.0"
|
||||
resolved "https://registry.yarnpkg.com/babel-polyfill/-/babel-polyfill-6.26.0.tgz#379937abc67d7895970adc621f284cd966cf2153"
|
||||
integrity sha1-N5k3q8Z9eJWXCtxiHyhM2WbPIVM=
|
||||
dependencies:
|
||||
babel-runtime "^6.26.0"
|
||||
core-js "^2.5.0"
|
||||
regenerator-runtime "^0.10.5"
|
||||
|
||||
babel-preset-es2015@^6.14.0:
|
||||
version "6.24.1"
|
||||
resolved "https://registry.yarnpkg.com/babel-preset-es2015/-/babel-preset-es2015-6.24.1.tgz#d44050d6bc2c9feea702aaf38d727a0210538939"
|
||||
@@ -1050,18 +1055,6 @@ babel-preset-react@^6.11.1:
|
||||
babel-plugin-transform-react-jsx-source "^6.22.0"
|
||||
babel-preset-flow "^6.23.0"
|
||||
|
||||
babel-register@^6.14.0, babel-register@^6.24.1:
|
||||
version "6.24.1"
|
||||
resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.24.1.tgz#7e10e13a2f71065bdfad5a1787ba45bca6ded75f"
|
||||
dependencies:
|
||||
babel-core "^6.24.1"
|
||||
babel-runtime "^6.22.0"
|
||||
core-js "^2.4.0"
|
||||
home-or-tmp "^2.0.0"
|
||||
lodash "^4.2.0"
|
||||
mkdirp "^0.5.1"
|
||||
source-map-support "^0.4.2"
|
||||
|
||||
babel-register@^6.26.0:
|
||||
version "6.26.0"
|
||||
resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.26.0.tgz#6ed021173e2fcb486d7acb45c6009a856f647071"
|
||||
@@ -1098,7 +1091,7 @@ babel-template@^6.16.0, babel-template@^6.26.0:
|
||||
babylon "^6.18.0"
|
||||
lodash "^4.17.4"
|
||||
|
||||
babel-template@^6.24.1, babel-template@^6.25.0:
|
||||
babel-template@^6.24.1:
|
||||
version "6.25.0"
|
||||
resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.25.0.tgz#665241166b7c2aa4c619d71e192969552b10c071"
|
||||
dependencies:
|
||||
@@ -1509,10 +1502,6 @@ camel-case@3.0.x:
|
||||
no-case "^2.2.0"
|
||||
upper-case "^1.1.1"
|
||||
|
||||
camelcase@^1.0.2:
|
||||
version "1.2.1"
|
||||
resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
|
||||
|
||||
camelcase@^4.1.0:
|
||||
version "4.1.0"
|
||||
resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-4.1.0.tgz#d545635be1e33c542649c69173e5de6acfae34dd"
|
||||
@@ -1540,13 +1529,6 @@ caseless@~0.12.0:
|
||||
resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc"
|
||||
integrity sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=
|
||||
|
||||
center-align@^0.1.1:
|
||||
version "0.1.3"
|
||||
resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad"
|
||||
dependencies:
|
||||
align-text "^0.1.3"
|
||||
lazy-cache "^1.0.3"
|
||||
|
||||
chain-function@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/chain-function/-/chain-function-1.0.0.tgz#0d4ab37e7e18ead0bdc47b920764118ce58733dc"
|
||||
@@ -1695,14 +1677,6 @@ cli-width@^2.0.0:
|
||||
resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-2.2.0.tgz#ff19ede8a9a5e579324147b0c11f0fbcbabed639"
|
||||
integrity sha1-/xnt6Kml5XkyQUewwR8PvLq+1jk=
|
||||
|
||||
cliui@^2.1.0:
|
||||
version "2.1.0"
|
||||
resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1"
|
||||
dependencies:
|
||||
center-align "^0.1.1"
|
||||
right-align "^0.1.1"
|
||||
wordwrap "0.0.2"
|
||||
|
||||
cliui@^4.0.0:
|
||||
version "4.0.0"
|
||||
resolved "https://registry.yarnpkg.com/cliui/-/cliui-4.0.0.tgz#743d4650e05f36d1ed2575b59638d87322bfbbcc"
|
||||
@@ -1802,7 +1776,12 @@ commander@2.17.x, commander@~2.17.1:
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-2.17.1.tgz#bd77ab7de6de94205ceacc72f1716d29f20a77bf"
|
||||
integrity sha512-wPMUt6FnH2yzG95SA6mzjQOEKUU3aLaDEmzs1ti+1E9h+CsrZghRlqEM/EJ4KscsQVG8uNN4uVreUeT8+drlgg==
|
||||
|
||||
commander@^2.2.0, commander@^2.8.1, commander@^2.9.0:
|
||||
commander@^2.11.0, commander@~2.20.0:
|
||||
version "2.20.0"
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.0.tgz#d58bb2b5c1ee8f87b0d340027e9e94e222c5a422"
|
||||
integrity sha512-7j2y+40w61zy6YC2iRNpUe/NwhNyoXrYpHMrSunaMG64nRnaf96zO/KMQR4OyN/UnE5KLyEBnKHd4aG3rskjpQ==
|
||||
|
||||
commander@^2.2.0, commander@^2.9.0:
|
||||
version "2.10.0"
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-2.10.0.tgz#e1f5d3245de246d1a5ca04702fa1ad1bd7e405fe"
|
||||
dependencies:
|
||||
@@ -1893,14 +1872,17 @@ content-type@~1.0.4:
|
||||
resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b"
|
||||
integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==
|
||||
|
||||
convert-source-map@^1.1.0:
|
||||
version "1.5.0"
|
||||
resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.0.tgz#9acd70851c6d5dfdd93d9282e5edf94a03ff46b5"
|
||||
|
||||
convert-source-map@^1.4.0, convert-source-map@^1.5.0:
|
||||
version "1.5.1"
|
||||
resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.1.tgz#b8278097b9bc229365de5c62cf5fcaed8b5599e5"
|
||||
|
||||
convert-source-map@^1.5.1:
|
||||
version "1.6.0"
|
||||
resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.6.0.tgz#51b537a8c43e0f04dec1993bffcdd504e758ac20"
|
||||
integrity sha512-eFu7XigvxdZ1ETfbgPBohgyQ/Z++C0eEhTor0qRwBw9unw+L0/6V8wkSuGgzdThkiS5lSpdptOQPD8Ak40a+7A==
|
||||
dependencies:
|
||||
safe-buffer "~5.1.1"
|
||||
|
||||
cookie-signature@1.0.6:
|
||||
version "1.0.6"
|
||||
resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c"
|
||||
@@ -2170,7 +2152,7 @@ date-now@^0.1.4:
|
||||
version "0.1.4"
|
||||
resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b"
|
||||
|
||||
debug@2.6.9, debug@^2.1.2, debug@^2.2.0, debug@^2.3.3, debug@^2.6.8:
|
||||
debug@2.6.9, debug@^2.1.2, debug@^2.2.0, debug@^2.3.3, debug@^2.6.8, debug@^2.6.9:
|
||||
version "2.6.9"
|
||||
resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f"
|
||||
dependencies:
|
||||
@@ -2187,7 +2169,7 @@ debug@^0.7.4:
|
||||
version "0.7.4"
|
||||
resolved "https://registry.yarnpkg.com/debug/-/debug-0.7.4.tgz#06e1ea8082c2cb14e39806e22e2f6f757f92af39"
|
||||
|
||||
debug@^2.1.1, debug@^2.1.3:
|
||||
debug@^2.1.3:
|
||||
version "2.6.8"
|
||||
resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.8.tgz#e731531ca2ede27d188222427da17821d68ff4fc"
|
||||
dependencies:
|
||||
@@ -2207,7 +2189,7 @@ debug@^4.1.0:
|
||||
dependencies:
|
||||
ms "^2.1.1"
|
||||
|
||||
decamelize@^1.0.0, decamelize@^1.1.1, decamelize@^1.1.2, decamelize@^1.2.0:
|
||||
decamelize@^1.1.1, decamelize@^1.1.2, decamelize@^1.2.0:
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
|
||||
integrity sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=
|
||||
@@ -2627,26 +2609,32 @@ enzyme-to-json@^3.3.0:
|
||||
dependencies:
|
||||
lodash "^4.17.4"
|
||||
|
||||
enzyme@^3.3.0:
|
||||
version "3.3.0"
|
||||
resolved "https://registry.yarnpkg.com/enzyme/-/enzyme-3.3.0.tgz#0971abd167f2d4bf3f5bd508229e1c4b6dc50479"
|
||||
enzyme@^3.10.0:
|
||||
version "3.10.0"
|
||||
resolved "https://registry.yarnpkg.com/enzyme/-/enzyme-3.10.0.tgz#7218e347c4a7746e133f8e964aada4a3523452f6"
|
||||
integrity sha512-p2yy9Y7t/PFbPoTvrWde7JIYB2ZyGC+NgTNbVEGvZ5/EyoYSr9aG/2rSbVvyNvMHEhw9/dmGUJHWtfQIEiX9pg==
|
||||
dependencies:
|
||||
array.prototype.flat "^1.2.1"
|
||||
cheerio "^1.0.0-rc.2"
|
||||
function.prototype.name "^1.0.3"
|
||||
has "^1.0.1"
|
||||
function.prototype.name "^1.1.0"
|
||||
has "^1.0.3"
|
||||
html-element-map "^1.0.0"
|
||||
is-boolean-object "^1.0.0"
|
||||
is-callable "^1.1.3"
|
||||
is-callable "^1.1.4"
|
||||
is-number-object "^1.0.3"
|
||||
is-regex "^1.0.4"
|
||||
is-string "^1.0.4"
|
||||
is-subset "^0.1.1"
|
||||
lodash "^4.17.4"
|
||||
object-inspect "^1.5.0"
|
||||
lodash.escape "^4.0.1"
|
||||
lodash.isequal "^4.5.0"
|
||||
object-inspect "^1.6.0"
|
||||
object-is "^1.0.1"
|
||||
object.assign "^4.1.0"
|
||||
object.entries "^1.0.4"
|
||||
object.values "^1.0.4"
|
||||
raf "^3.4.0"
|
||||
rst-selector-parser "^2.2.3"
|
||||
string.prototype.trim "^1.1.2"
|
||||
|
||||
errno@^0.1.1, errno@^0.1.3, errno@~0.1.7:
|
||||
version "0.1.7"
|
||||
@@ -2662,6 +2650,18 @@ error-ex@^1.2.0:
|
||||
dependencies:
|
||||
is-arrayish "^0.2.1"
|
||||
|
||||
es-abstract@^1.10.0, es-abstract@^1.5.0:
|
||||
version "1.13.0"
|
||||
resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.13.0.tgz#ac86145fdd5099d8dd49558ccba2eaf9b88e24e9"
|
||||
integrity sha512-vDZfg/ykNxQVwup/8E1BZhVzFfBxs9NqMzGcvIJrqg5k2/5Za2bWo40dK2J1pgLngZ7c+Shh8lwYtLGyrwPutg==
|
||||
dependencies:
|
||||
es-to-primitive "^1.2.0"
|
||||
function-bind "^1.1.1"
|
||||
has "^1.0.3"
|
||||
is-callable "^1.1.4"
|
||||
is-regex "^1.0.4"
|
||||
object-keys "^1.0.12"
|
||||
|
||||
es-abstract@^1.5.1:
|
||||
version "1.10.0"
|
||||
resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.10.0.tgz#1ecb36c197842a00d8ee4c2dfd8646bb97d60864"
|
||||
@@ -2681,7 +2681,7 @@ es-abstract@^1.6.1:
|
||||
is-callable "^1.1.3"
|
||||
is-regex "^1.0.3"
|
||||
|
||||
es-to-primitive@^1.1.1:
|
||||
es-to-primitive@^1.1.1, es-to-primitive@^1.2.0:
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.0.tgz#edf72478033456e8dda8ef09e00ad9650707f377"
|
||||
integrity sha512-qZryBOJjV//LaxLTV6UC//WewneB3LcXOL9NP++ozKVXsIIIpm/2c13UDiD9Jp2eThsecw9m3jPqDwTyobcdbg==
|
||||
@@ -3235,10 +3235,6 @@ follow-redirects@^1.0.0:
|
||||
dependencies:
|
||||
debug "=3.1.0"
|
||||
|
||||
font-awesome@^4.7.0:
|
||||
version "4.7.0"
|
||||
resolved "https://registry.yarnpkg.com/font-awesome/-/font-awesome-4.7.0.tgz#8fa8cf0411a1a31afd07b06d2902bb9fc815a133"
|
||||
|
||||
for-in@^1.0.1, for-in@^1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
|
||||
@@ -3364,27 +3360,28 @@ fstream-ignore@^1.0.5:
|
||||
minimatch "^3.0.0"
|
||||
|
||||
fstream@^1.0.0, fstream@^1.0.10, fstream@^1.0.2:
|
||||
version "1.0.11"
|
||||
resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171"
|
||||
integrity sha1-XB+x8RdHcRTwYyoOtLcbPLD9MXE=
|
||||
version "1.0.12"
|
||||
resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.12.tgz#4e8ba8ee2d48be4f7d0de505455548eae5932045"
|
||||
integrity sha512-WvJ193OHa0GHPEL+AycEJgxvBEwyfRkN1vhjca23OaPVMCaLCXTd5qAu82AjTcgP1UJmytkOKb63Ypde7raDIg==
|
||||
dependencies:
|
||||
graceful-fs "^4.1.2"
|
||||
inherits "~2.0.0"
|
||||
mkdirp ">=0.5 0"
|
||||
rimraf "2"
|
||||
|
||||
function-bind@^1.1.0:
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.0.tgz#16176714c801798e4e8f2cf7f7529467bb4a5771"
|
||||
|
||||
function-bind@^1.1.1:
|
||||
function-bind@^1.0.2, function-bind@^1.1.1:
|
||||
version "1.1.1"
|
||||
resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d"
|
||||
integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==
|
||||
|
||||
function.prototype.name@^1.0.3:
|
||||
function-bind@^1.1.0:
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.0.tgz#16176714c801798e4e8f2cf7f7529467bb4a5771"
|
||||
|
||||
function.prototype.name@^1.1.0:
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.0.tgz#8bd763cc0af860a859cc5d49384d74b932cd2327"
|
||||
integrity sha512-Bs0VRrTz4ghD8pTmbJQD1mZ8A/mN0ur/jGz+A6FBxPDUPkm1tNfF6bhTYPA7i7aF4lZJVr+OXTNNrnnIl58Wfg==
|
||||
dependencies:
|
||||
define-properties "^1.1.2"
|
||||
function-bind "^1.1.1"
|
||||
@@ -3471,7 +3468,19 @@ glob-parent@^3.1.0:
|
||||
is-glob "^3.1.0"
|
||||
path-dirname "^1.0.0"
|
||||
|
||||
glob@^7.0.0, glob@^7.1.1, glob@^7.1.2:
|
||||
glob@^7.0.3, glob@^7.0.5:
|
||||
version "7.1.3"
|
||||
resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.3.tgz#3960832d3f1574108342dafd3a67b332c0969df1"
|
||||
integrity sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ==
|
||||
dependencies:
|
||||
fs.realpath "^1.0.0"
|
||||
inflight "^1.0.4"
|
||||
inherits "2"
|
||||
minimatch "^3.0.4"
|
||||
once "^1.3.0"
|
||||
path-is-absolute "^1.0.0"
|
||||
|
||||
glob@^7.1.1, glob@^7.1.2:
|
||||
version "7.1.2"
|
||||
resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.2.tgz#c19c9df9a028702d678612384a6552404c636d15"
|
||||
dependencies:
|
||||
@@ -3482,10 +3491,10 @@ glob@^7.0.0, glob@^7.1.1, glob@^7.1.2:
|
||||
once "^1.3.0"
|
||||
path-is-absolute "^1.0.0"
|
||||
|
||||
glob@^7.0.3, glob@^7.0.5, glob@^7.1.3:
|
||||
version "7.1.3"
|
||||
resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.3.tgz#3960832d3f1574108342dafd3a67b332c0969df1"
|
||||
integrity sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ==
|
||||
glob@^7.1.3:
|
||||
version "7.1.4"
|
||||
resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.4.tgz#aa608a2f6c577ad357e1ae5a5c26d9a8d1969255"
|
||||
integrity sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==
|
||||
dependencies:
|
||||
fs.realpath "^1.0.0"
|
||||
inflight "^1.0.4"
|
||||
@@ -3546,11 +3555,7 @@ globby@^7.1.1:
|
||||
pify "^3.0.0"
|
||||
slash "^1.0.0"
|
||||
|
||||
graceful-fs@^4.1.11, graceful-fs@^4.1.4:
|
||||
version "4.1.11"
|
||||
resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
|
||||
|
||||
graceful-fs@^4.1.15, graceful-fs@^4.1.2:
|
||||
graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.4:
|
||||
version "4.1.15"
|
||||
resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.15.tgz#ffb703e1066e8a0eeaa4c8b80ba9253eeefbfb00"
|
||||
integrity sha512-6uHUhOPEBgQ24HM+r6b/QwWfZq+yiFcipKFrOFiBEnWdy5sdzYoi+pJeQaPI5qOLRFqWmAXUPQNsielzdLoecA==
|
||||
@@ -3570,14 +3575,15 @@ handle-thing@^2.0.0:
|
||||
integrity sha512-d4sze1JNC454Wdo2fkuyzCr6aHcbL6PGGuFAz0Li/NcOm1tCHGnWDRmJP85dh9IhQErTc2svWFEX5xHIOo//kQ==
|
||||
|
||||
handlebars@^4.0.3:
|
||||
version "4.0.11"
|
||||
resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.0.11.tgz#630a35dfe0294bc281edae6ffc5d329fc7982dcc"
|
||||
version "4.1.2"
|
||||
resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.1.2.tgz#b6b37c1ced0306b221e094fc7aca3ec23b131b67"
|
||||
integrity sha512-nvfrjqvt9xQ8Z/w0ijewdD/vvWDTOweBUm96NTr66Wfvo1mJenBLwcYmPs3TIBP5ruzYGD7Hx/DaM9RmhroGPw==
|
||||
dependencies:
|
||||
async "^1.4.0"
|
||||
neo-async "^2.6.0"
|
||||
optimist "^0.6.1"
|
||||
source-map "^0.4.4"
|
||||
source-map "^0.6.1"
|
||||
optionalDependencies:
|
||||
uglify-js "^2.6"
|
||||
uglify-js "^3.1.4"
|
||||
|
||||
har-schema@^1.0.5:
|
||||
version "1.0.5"
|
||||
@@ -3667,7 +3673,7 @@ has-values@^1.0.0:
|
||||
is-number "^3.0.0"
|
||||
kind-of "^4.0.0"
|
||||
|
||||
has@^1.0.1:
|
||||
has@^1.0.1, has@^1.0.3:
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796"
|
||||
integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==
|
||||
@@ -3781,6 +3787,13 @@ html-comment-regex@^1.1.0:
|
||||
version "1.1.1"
|
||||
resolved "https://registry.yarnpkg.com/html-comment-regex/-/html-comment-regex-1.1.1.tgz#668b93776eaae55ebde8f3ad464b307a4963625e"
|
||||
|
||||
html-element-map@^1.0.0:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/html-element-map/-/html-element-map-1.0.1.tgz#3c4fcb4874ebddfe4283b51c8994e7713782b592"
|
||||
integrity sha512-BZSfdEm6n706/lBfXKWa4frZRZcT5k1cOusw95ijZsHlI+GdgY0v95h6IzO3iIDf2ROwq570YTwqNPqHcNMozw==
|
||||
dependencies:
|
||||
array-filter "^1.0.0"
|
||||
|
||||
html-encoding-sniffer@^1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-1.0.2.tgz#e70d84b94da53aa375e11fe3a351be6642ca46f8"
|
||||
@@ -4907,6 +4920,11 @@ jsprim@^1.2.2:
|
||||
json-schema "0.2.3"
|
||||
verror "1.10.0"
|
||||
|
||||
jwt-decode@^2.2.0:
|
||||
version "2.2.0"
|
||||
resolved "https://registry.yarnpkg.com/jwt-decode/-/jwt-decode-2.2.0.tgz#7d86bd56679f58ce6a84704a657dd392bba81a79"
|
||||
integrity sha1-fYa9VmefWM5qhHBKZX3TkruoGnk=
|
||||
|
||||
keycode@^2.1.2:
|
||||
version "2.1.9"
|
||||
resolved "https://registry.yarnpkg.com/keycode/-/keycode-2.1.9.tgz#964a23c54e4889405b4861a5c9f0480d45141dfa"
|
||||
@@ -4940,10 +4958,6 @@ kind-of@^6.0.0, kind-of@^6.0.2:
|
||||
resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.2.tgz#01146b36a6218e64e58f3a8d66de5d7fc6f6d051"
|
||||
integrity sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==
|
||||
|
||||
lazy-cache@^1.0.3:
|
||||
version "1.0.4"
|
||||
resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e"
|
||||
|
||||
lcid@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835"
|
||||
@@ -5062,8 +5076,9 @@ locate-path@^3.0.0:
|
||||
path-exists "^3.0.0"
|
||||
|
||||
lodash-es@^4.2.0, lodash-es@^4.2.1:
|
||||
version "4.17.4"
|
||||
resolved "https://registry.yarnpkg.com/lodash-es/-/lodash-es-4.17.4.tgz#dcc1d7552e150a0640073ba9cb31d70f032950e7"
|
||||
version "4.17.14"
|
||||
resolved "https://registry.yarnpkg.com/lodash-es/-/lodash-es-4.17.14.tgz#12a95a963cc5955683cee3b74e85458954f37ecc"
|
||||
integrity sha512-7zchRrGa8UZXjD/4ivUWP1867jDkhzTG2c/uj739utSd7O/pFFdxspCemIFKEEjErbcqRzn8nKnGsi7mvTgRPA==
|
||||
|
||||
lodash._baseisequal@^3.0.0:
|
||||
version "3.0.7"
|
||||
@@ -5109,6 +5124,11 @@ lodash.deburr@^3.0.0:
|
||||
dependencies:
|
||||
lodash._root "^3.0.0"
|
||||
|
||||
lodash.escape@^4.0.1:
|
||||
version "4.0.1"
|
||||
resolved "https://registry.yarnpkg.com/lodash.escape/-/lodash.escape-4.0.1.tgz#c9044690c21e04294beaa517712fded1fa88de98"
|
||||
integrity sha1-yQRGkMIeBClL6qUXcS/e0fqI3pg=
|
||||
|
||||
lodash.flattendeep@^4.4.0:
|
||||
version "4.4.0"
|
||||
resolved "https://registry.yarnpkg.com/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz#fb030917f86a3134e5bc9bec0d69e0013ddfedb2"
|
||||
@@ -5128,6 +5148,11 @@ lodash.isequal@^3.0:
|
||||
lodash._baseisequal "^3.0.0"
|
||||
lodash._bindcallback "^3.0.0"
|
||||
|
||||
lodash.isequal@^4.5.0:
|
||||
version "4.5.0"
|
||||
resolved "https://registry.yarnpkg.com/lodash.isequal/-/lodash.isequal-4.5.0.tgz#415c4478f2bcc30120c22ce10ed3226f7d3e18e0"
|
||||
integrity sha1-QVxEePK8wwEgwizhDtMib30+GOA=
|
||||
|
||||
lodash.isplainobject@^4.0.6:
|
||||
version "4.0.6"
|
||||
resolved "https://registry.yarnpkg.com/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz#7c526a52d89b45c45cc690b88163be0497f550cb"
|
||||
@@ -5162,24 +5187,16 @@ lodash.words@^3.0.0:
|
||||
dependencies:
|
||||
lodash._root "^3.0.0"
|
||||
|
||||
lodash@^4.13.1, lodash@^4.14.0, lodash@^4.15.0, lodash@^4.17.4, lodash@^4.2.0, lodash@^4.2.1:
|
||||
version "4.17.4"
|
||||
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae"
|
||||
|
||||
lodash@^4.17.3, lodash@^4.17.5, lodash@^4.3.0:
|
||||
version "4.17.11"
|
||||
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.11.tgz#b39ea6229ef607ecd89e2c8df12536891cac9b8d"
|
||||
integrity sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==
|
||||
lodash@^4.13.1, lodash@^4.14.0, lodash@^4.15.0, lodash@^4.17.3, lodash@^4.17.4, lodash@^4.17.5, lodash@^4.2.0, lodash@^4.2.1, lodash@^4.3.0:
|
||||
version "4.17.14"
|
||||
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.14.tgz#9ce487ae66c96254fe20b599f21b6816028078ba"
|
||||
integrity sha512-mmKYbW3GLuJeX+iGP+Y7Gp1AiGHGbXHCOh/jZmrawMmsE7MS4znI3RL2FsjbqOyMayHInjOeykW7PEajUk1/xw==
|
||||
|
||||
loglevel@^1.4.1:
|
||||
version "1.6.1"
|
||||
resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.6.1.tgz#e0fc95133b6ef276cdc8887cdaf24aa6f156f8fa"
|
||||
integrity sha1-4PyVEztu8nbNyIh82vJKpvFW+Po=
|
||||
|
||||
longest@^1.0.1:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/longest/-/longest-1.0.1.tgz#30a0b2da38f73770e8294a0d22e6625ed77d0097"
|
||||
|
||||
loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.2.0, loose-envify@^1.3.1:
|
||||
version "1.3.1"
|
||||
resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.3.1.tgz#d1a8ad33fa9ce0e713d65fdd0ac8b748d478c848"
|
||||
@@ -5220,8 +5237,9 @@ lru-cache@^5.1.1:
|
||||
yallist "^3.0.2"
|
||||
|
||||
macaddress@^0.2.8:
|
||||
version "0.2.8"
|
||||
resolved "https://registry.yarnpkg.com/macaddress/-/macaddress-0.2.8.tgz#5904dc537c39ec6dbefeae902327135fa8511f12"
|
||||
version "0.2.9"
|
||||
resolved "https://registry.yarnpkg.com/macaddress/-/macaddress-0.2.9.tgz#3579b8b9acd5b96b4553abf0f394185a86813cb3"
|
||||
integrity sha512-k4F1JUof6cQXxNFzx3thLby4oJzXTXQueAOOts944Vqizn+Rjc2QNFenT9FJSLU1CH3PmrHRSyZs2E+Cqw+P2w==
|
||||
|
||||
make-dir@^1.0.0:
|
||||
version "1.3.0"
|
||||
@@ -5441,7 +5459,7 @@ minimatch@^3.0.0, minimatch@^3.0.2, minimatch@^3.0.3, minimatch@^3.0.4:
|
||||
dependencies:
|
||||
brace-expansion "^1.1.7"
|
||||
|
||||
minimist@0.0.8, minimist@~0.0.1:
|
||||
minimist@0.0.8:
|
||||
version "0.0.8"
|
||||
resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
|
||||
integrity sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=
|
||||
@@ -5456,6 +5474,11 @@ minimist@^0.1.0:
|
||||
resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.1.0.tgz#99df657a52574c21c9057497df742790b2b4c0de"
|
||||
integrity sha1-md9lelJXTCHJBXSX33QnkLK0wN4=
|
||||
|
||||
minimist@~0.0.1:
|
||||
version "0.0.10"
|
||||
resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.10.tgz#de3f98543dbf96082be48ad1a0c7cda836301dcf"
|
||||
integrity sha1-3j+YVD2/lggr5IrRoMfNqDYwHc8=
|
||||
|
||||
minipass@^2.2.1, minipass@^2.3.4:
|
||||
version "2.3.5"
|
||||
resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.3.5.tgz#cacebe492022497f656b0f0f51e2682a9ed2d848"
|
||||
@@ -5517,9 +5540,10 @@ mkdirp@0.5.x, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkd
|
||||
dependencies:
|
||||
minimist "0.0.8"
|
||||
|
||||
moment@^2.15.1:
|
||||
version "2.18.1"
|
||||
resolved "https://registry.yarnpkg.com/moment/-/moment-2.18.1.tgz#c36193dd3ce1c2eed2adb7c802dbbc77a81b1c0f"
|
||||
moment@^2.24.0:
|
||||
version "2.24.0"
|
||||
resolved "https://registry.yarnpkg.com/moment/-/moment-2.24.0.tgz#0d055d53f5052aa653c9f6eb68bb5d12bf5c2b5b"
|
||||
integrity sha512-bV7f+6l2QigeBBZSM/6yTNq4P2fNpSWj/0e7jQcy87A8e7o2nAfP/34/2ky5Vw4B9S446EtIhodAzkFCcR4dQg==
|
||||
|
||||
"mout@>=0.9 <2.0":
|
||||
version "1.0.0"
|
||||
@@ -5617,10 +5641,10 @@ negotiator@0.6.1:
|
||||
resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9"
|
||||
integrity sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk=
|
||||
|
||||
neo-async@^2.5.0:
|
||||
version "2.6.0"
|
||||
resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.0.tgz#b9d15e4d71c6762908654b5183ed38b753340835"
|
||||
integrity sha512-MFh0d/Wa7vkKO3Y3LlacqAEeHK0mckVqzDieUKTT+KGxi+zIpeVsFxymkIiRpbpDziHc290Xr9A1O4Om7otoRA==
|
||||
neo-async@^2.5.0, neo-async@^2.6.0:
|
||||
version "2.6.1"
|
||||
resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.1.tgz#ac27ada66167fa8849a6addd837f6b189ad2081c"
|
||||
integrity sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw==
|
||||
|
||||
nice-try@^1.0.4:
|
||||
version "1.0.5"
|
||||
@@ -5848,8 +5872,9 @@ number-is-nan@^1.0.0:
|
||||
integrity sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=
|
||||
|
||||
nwmatcher@^1.4.3:
|
||||
version "1.4.3"
|
||||
resolved "https://registry.yarnpkg.com/nwmatcher/-/nwmatcher-1.4.3.tgz#64348e3b3d80f035b40ac11563d278f8b72db89c"
|
||||
version "1.4.4"
|
||||
resolved "https://registry.yarnpkg.com/nwmatcher/-/nwmatcher-1.4.4.tgz#2285631f34a95f0d0395cd900c96ed39b58f346e"
|
||||
integrity sha512-3iuY4N5dhgMpCUrOVnuAdGrgxVqV2cJpM+XNccjR2DKOB1RUP0aA+wGXEiNziG/UKboFyGBIoKOaNlJxx8bciQ==
|
||||
|
||||
oauth-sign@~0.8.1, oauth-sign@~0.8.2:
|
||||
version "0.8.2"
|
||||
@@ -5872,9 +5897,10 @@ object-inspect@^1.1.0:
|
||||
version "1.2.2"
|
||||
resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.2.2.tgz#c82115e4fcc888aea14d64c22e4f17f6a70d5e5a"
|
||||
|
||||
object-inspect@^1.5.0:
|
||||
version "1.5.0"
|
||||
resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.5.0.tgz#9d876c11e40f485c79215670281b767488f9bfe3"
|
||||
object-inspect@^1.6.0:
|
||||
version "1.6.0"
|
||||
resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.6.0.tgz#c70b6cbf72f274aab4c34c0c82f5167bf82cf15b"
|
||||
integrity sha512-GJzfBZ6DgDAmnuaM3104jR4s1Myxr3Y3zfIyN4z3UdqN69oSRacNK8UhnobDdC+7J2AHCjGwxQubNJfE70SXXQ==
|
||||
|
||||
object-is@^1.0.1:
|
||||
version "1.0.1"
|
||||
@@ -5965,6 +5991,7 @@ on-headers@~1.0.1:
|
||||
once@^1.3.0, once@^1.3.1, once@^1.3.3, once@^1.4.0:
|
||||
version "1.4.0"
|
||||
resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
|
||||
integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E=
|
||||
dependencies:
|
||||
wrappy "1"
|
||||
|
||||
@@ -6005,6 +6032,7 @@ opn@^5.1.0:
|
||||
optimist@^0.6.1:
|
||||
version "0.6.1"
|
||||
resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686"
|
||||
integrity sha1-2j6nRob6IaGaERwybpDrFaAZZoY=
|
||||
dependencies:
|
||||
minimist "~0.0.1"
|
||||
wordwrap "~0.0.2"
|
||||
@@ -6066,9 +6094,10 @@ osenv@^0.1.4:
|
||||
os-homedir "^1.0.0"
|
||||
os-tmpdir "^1.0.0"
|
||||
|
||||
output-file-sync@^1.1.0:
|
||||
output-file-sync@^1.1.2:
|
||||
version "1.1.2"
|
||||
resolved "https://registry.yarnpkg.com/output-file-sync/-/output-file-sync-1.1.2.tgz#d0a33eefe61a205facb90092e826598d5245ce76"
|
||||
integrity sha1-0KM+7+YaIF+suQCS6CZZjVJFznY=
|
||||
dependencies:
|
||||
graceful-fs "^4.1.4"
|
||||
mkdirp "^0.5.1"
|
||||
@@ -6642,7 +6671,7 @@ private@^0.1.6:
|
||||
version "0.1.7"
|
||||
resolved "https://registry.yarnpkg.com/private/-/private-0.1.7.tgz#68ce5e8a1ef0a23bb570cc28537b5332aba63ef1"
|
||||
|
||||
private@^0.1.7:
|
||||
private@^0.1.7, private@^0.1.8:
|
||||
version "0.1.8"
|
||||
resolved "https://registry.yarnpkg.com/private/-/private-0.1.8.tgz#2381edb3689f7a53d653190060fcf822d2f368ff"
|
||||
|
||||
@@ -7153,7 +7182,7 @@ regenerate@^1.2.1:
|
||||
version "1.3.2"
|
||||
resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.3.2.tgz#d1941c67bad437e1be76433add5b385f95b19260"
|
||||
|
||||
regenerator-runtime@^0.10.0:
|
||||
regenerator-runtime@^0.10.0, regenerator-runtime@^0.10.5:
|
||||
version "0.10.5"
|
||||
resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.10.5.tgz#336c3efc1220adcedda2c9fab67b5a7955a33658"
|
||||
|
||||
@@ -7386,25 +7415,13 @@ ret@~0.1.10:
|
||||
version "0.1.15"
|
||||
resolved "https://registry.yarnpkg.com/ret/-/ret-0.1.15.tgz#b8a4825d5bdb1fc3f6f53c2bc33f81388681c7bc"
|
||||
|
||||
right-align@^0.1.1:
|
||||
version "0.1.3"
|
||||
resolved "https://registry.yarnpkg.com/right-align/-/right-align-0.1.3.tgz#61339b722fe6a3515689210d24e14c96148613ef"
|
||||
dependencies:
|
||||
align-text "^0.1.1"
|
||||
|
||||
rimraf@2, rimraf@^2.2.8, rimraf@^2.5.1, rimraf@^2.6.1, rimraf@^2.6.2:
|
||||
rimraf@2, rimraf@^2.2.8, rimraf@^2.5.1, rimraf@^2.5.4, rimraf@^2.6.1, rimraf@^2.6.2:
|
||||
version "2.6.3"
|
||||
resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.3.tgz#b2d104fe0d8fb27cf9e0a1cda8262dd3833c6cab"
|
||||
integrity sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==
|
||||
dependencies:
|
||||
glob "^7.1.3"
|
||||
|
||||
rimraf@^2.5.4:
|
||||
version "2.6.2"
|
||||
resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.2.tgz#2ed8150d24a16ea8651e6d6ef0f47c4158ce7a36"
|
||||
dependencies:
|
||||
glob "^7.0.5"
|
||||
|
||||
ripemd160@^2.0.0, ripemd160@^2.0.1:
|
||||
version "2.0.1"
|
||||
resolved "https://registry.yarnpkg.com/ripemd160/-/ripemd160-2.0.1.tgz#0f4584295c53a3628af7e6d79aca21ce57d1c6e7"
|
||||
@@ -7774,12 +7791,6 @@ source-map-support@^0.4.15:
|
||||
dependencies:
|
||||
source-map "^0.5.6"
|
||||
|
||||
source-map-support@^0.4.2:
|
||||
version "0.4.15"
|
||||
resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.4.15.tgz#03202df65c06d2bd8c7ec2362a193056fef8d3b1"
|
||||
dependencies:
|
||||
source-map "^0.5.6"
|
||||
|
||||
source-map-support@^0.5.0:
|
||||
version "0.5.3"
|
||||
resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.3.tgz#2b3d5fff298cfa4d1afd7d4352d569e9a0158e76"
|
||||
@@ -7799,17 +7810,7 @@ source-map-url@^0.4.0:
|
||||
resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.0.tgz#3e935d7ddd73631b97659956d55128e87b5084a3"
|
||||
integrity sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=
|
||||
|
||||
source-map@^0.4.4:
|
||||
version "0.4.4"
|
||||
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b"
|
||||
dependencies:
|
||||
amdefine ">=0.0.4"
|
||||
|
||||
source-map@^0.5.0, source-map@^0.5.3:
|
||||
version "0.5.6"
|
||||
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412"
|
||||
|
||||
source-map@^0.5.6, source-map@~0.5.1, source-map@~0.5.6:
|
||||
source-map@^0.5.3, source-map@^0.5.6, source-map@^0.5.7, source-map@~0.5.6:
|
||||
version "0.5.7"
|
||||
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc"
|
||||
integrity sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=
|
||||
@@ -7817,6 +7818,7 @@ source-map@^0.5.6, source-map@~0.5.1, source-map@~0.5.6:
|
||||
source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0, source-map@~0.6.1:
|
||||
version "0.6.1"
|
||||
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263"
|
||||
integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
|
||||
|
||||
spdx-correct@^3.0.0:
|
||||
version "3.1.0"
|
||||
@@ -8002,6 +8004,15 @@ string-width@^2.0.0:
|
||||
is-fullwidth-code-point "^2.0.0"
|
||||
strip-ansi "^3.0.0"
|
||||
|
||||
string.prototype.trim@^1.1.2:
|
||||
version "1.1.2"
|
||||
resolved "https://registry.yarnpkg.com/string.prototype.trim/-/string.prototype.trim-1.1.2.tgz#d04de2c89e137f4d7d206f086b5ed2fae6be8cea"
|
||||
integrity sha1-0E3iyJ4Tf019IG8Ia17S+ua+jOo=
|
||||
dependencies:
|
||||
define-properties "^1.1.2"
|
||||
es-abstract "^1.5.0"
|
||||
function-bind "^1.0.2"
|
||||
|
||||
string_decoder@^1.0.0, string_decoder@~1.0.3:
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.0.3.tgz#0fc67d7c141825de94282dd536bec6b9bce860ab"
|
||||
@@ -8028,15 +8039,11 @@ string_decoder@~1.1.1:
|
||||
dependencies:
|
||||
safe-buffer "~5.1.0"
|
||||
|
||||
stringstream@~0.0.4:
|
||||
stringstream@~0.0.4, stringstream@~0.0.5:
|
||||
version "0.0.6"
|
||||
resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.6.tgz#7880225b0d4ad10e30927d167a1d6f2fd3b33a72"
|
||||
integrity sha512-87GEBAkegbBcweToUrdzf3eLhWNg06FJTebl4BVJz/JgWy8CvEr9dRtX5qWphiynMSQlxxi+QqN0z5T32SLlhA==
|
||||
|
||||
stringstream@~0.0.5:
|
||||
version "0.0.5"
|
||||
resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
|
||||
|
||||
strip-ansi@^3.0.0, strip-ansi@^3.0.1:
|
||||
version "3.0.1"
|
||||
resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
|
||||
@@ -8400,18 +8407,13 @@ uglify-js@3.4.x:
|
||||
commander "~2.17.1"
|
||||
source-map "~0.6.1"
|
||||
|
||||
uglify-js@^2.6:
|
||||
version "2.8.29"
|
||||
resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.8.29.tgz#29c5733148057bb4e1f75df35b7a9cb72e6a59dd"
|
||||
uglify-js@^3.1.4:
|
||||
version "3.6.0"
|
||||
resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.6.0.tgz#704681345c53a8b2079fb6cec294b05ead242ff5"
|
||||
integrity sha512-W+jrUHJr3DXKhrsS7NUVxn3zqMOFn0hL/Ei6v0anCIMoKC93TjcflTagwIHLW7SfMFfiQuktQyFVCFHGUE0+yg==
|
||||
dependencies:
|
||||
source-map "~0.5.1"
|
||||
yargs "~3.10.0"
|
||||
optionalDependencies:
|
||||
uglify-to-browserify "~1.0.0"
|
||||
|
||||
uglify-to-browserify@~1.0.0:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7"
|
||||
commander "~2.20.0"
|
||||
source-map "~0.6.1"
|
||||
|
||||
uid-number@^0.0.6:
|
||||
version "0.0.6"
|
||||
@@ -8584,9 +8586,10 @@ v8-compile-cache@^2.0.2:
|
||||
resolved "https://registry.yarnpkg.com/v8-compile-cache/-/v8-compile-cache-2.0.2.tgz#a428b28bb26790734c4fc8bc9fa106fccebf6a6c"
|
||||
integrity sha512-1wFuMUIM16MDJRCrpbpuEPTUGmM5QMUg0cr3KFwra2XgOgFcPGDQHDh3CszSCD2Zewc/dh/pamNEW8CbfDebUw==
|
||||
|
||||
v8flags@^2.0.10:
|
||||
v8flags@^2.1.1:
|
||||
version "2.1.1"
|
||||
resolved "https://registry.yarnpkg.com/v8flags/-/v8flags-2.1.1.tgz#aab1a1fa30d45f88dd321148875ac02c0b55e5b4"
|
||||
integrity sha1-qrGh+jDUX4jdMhFIh1rALAtV5bQ=
|
||||
dependencies:
|
||||
user-home "^1.1.1"
|
||||
|
||||
@@ -8847,17 +8850,10 @@ wide-align@^1.1.0:
|
||||
dependencies:
|
||||
string-width "^1.0.2 || 2"
|
||||
|
||||
window-size@0.1.0:
|
||||
version "0.1.0"
|
||||
resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d"
|
||||
|
||||
wordwrap@0.0.2:
|
||||
version "0.0.2"
|
||||
resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f"
|
||||
|
||||
wordwrap@~0.0.2:
|
||||
version "0.0.3"
|
||||
resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107"
|
||||
integrity sha1-o9XabNXAvAAI03I0u68b7WMFkQc=
|
||||
|
||||
wordwrap@~1.0.0:
|
||||
version "1.0.0"
|
||||
@@ -9011,12 +9007,3 @@ yargs@~1.2.6:
|
||||
integrity sha1-nHtKgv1dWVsr8Xq23MQxNUMv40s=
|
||||
dependencies:
|
||||
minimist "^0.1.0"
|
||||
|
||||
yargs@~3.10.0:
|
||||
version "3.10.0"
|
||||
resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.10.0.tgz#f7ee7bd857dd7c1d2d38c0e74efbd681d1431fd1"
|
||||
dependencies:
|
||||
camelcase "^1.0.2"
|
||||
cliui "^2.1.0"
|
||||
decamelize "^1.0.0"
|
||||
window-size "0.1.0"
|
||||
|
||||
@@ -119,7 +119,7 @@ assert_is_supported_os() {
|
||||
|
||||
assert_check_golang_env() {
|
||||
if ! which go >/dev/null 2>&1; then
|
||||
echo "Cannot find go binary in your PATH configuration, please refer to Go installation document at https://docs.min.io/docs/how-to-install-golang"
|
||||
echo "Cannot find go binary in your PATH configuration, please refer to Go installation document at https://golang.org/doc/install"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ function _build() {
|
||||
export GOOS=$os
|
||||
export GOARCH=$arch
|
||||
export GO111MODULE=on
|
||||
export GOPROXY=https://proxy.golang.org
|
||||
go build -tags kqueue -o /dev/null
|
||||
}
|
||||
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
|
||||
set -e
|
||||
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
GOPROXY=https://proxy.golang.org GO111MODULE=on CGO_ENABLED=0 go test -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
|
||||
@@ -33,6 +33,7 @@ export ACCESS_KEY="minio"
|
||||
export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
export GOPROXY=https://proxy.golang.org
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" )
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
@@ -30,12 +31,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/tidwall/sjson"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/cpu"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
@@ -44,10 +46,12 @@ import (
|
||||
"github.com/minio/minio/pkg/mem"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
trace "github.com/minio/minio/pkg/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
maxEConfigJSONSize = 262272
|
||||
defaultNetPerfSize = 100 * humanize.MiByte
|
||||
)
|
||||
|
||||
// Type-safe query params.
|
||||
@@ -241,17 +245,11 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
thisAddr, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
serverInfo := globalNotificationSys.ServerInfo(ctx)
|
||||
// Once we have received all the ServerInfo from peers
|
||||
// add the local peer server info as well.
|
||||
serverInfo = append(serverInfo, ServerInfo{
|
||||
Addr: thisAddr.String(),
|
||||
Addr: getHostName(r),
|
||||
Data: &ServerInfoData{
|
||||
StorageInfo: objectAPI.StorageInfo(ctx),
|
||||
ConnStats: globalConnStats.toServerConnStats(),
|
||||
@@ -308,6 +306,13 @@ type ServerMemUsageInfo struct {
|
||||
HistoricUsage []mem.Usage `json:"historicUsage"`
|
||||
}
|
||||
|
||||
// ServerNetReadPerfInfo network read performance information.
|
||||
type ServerNetReadPerfInfo struct {
|
||||
Addr string `json:"addr"`
|
||||
ReadPerf time.Duration `json:"readPerf"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// PerfInfoHandler - GET /minio/admin/v1/performance?perfType={perfType}
|
||||
// ----------
|
||||
// Get all performance information based on input type
|
||||
@@ -322,6 +327,44 @@ func (a adminAPIHandlers) PerfInfoHandler(w http.ResponseWriter, r *http.Request
|
||||
|
||||
vars := mux.Vars(r)
|
||||
switch perfType := vars["perfType"]; perfType {
|
||||
case "net":
|
||||
var size int64 = defaultNetPerfSize
|
||||
if sizeStr, found := vars["size"]; found {
|
||||
var err error
|
||||
if size, err = strconv.ParseInt(sizeStr, 10, 64); err != nil || size < 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
storage := objectAPI.StorageInfo(ctx)
|
||||
if !(storage.Backend.Type == BackendFS || storage.Backend.Type == BackendErasure) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(globalEndpoints)
|
||||
}
|
||||
|
||||
infos := map[string][]ServerNetReadPerfInfo{}
|
||||
infos[addr] = globalNotificationSys.NetReadPerfInfo(size)
|
||||
for peer, info := range globalNotificationSys.CollectNetPerfInfo(size) {
|
||||
infos[peer] = info
|
||||
}
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(infos)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Reply with performance information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
|
||||
case "drive":
|
||||
info := objectAPI.StorageInfo(ctx)
|
||||
if !(info.Backend.Type == BackendFS || info.Backend.Type == BackendErasure) {
|
||||
@@ -329,7 +372,7 @@ func (a adminAPIHandlers) PerfInfoHandler(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
// Get drive performance details from local server's drive(s)
|
||||
dp := localEndpointsDrivePerf(globalEndpoints)
|
||||
dp := localEndpointsDrivePerf(globalEndpoints, r)
|
||||
|
||||
// Notify all other MinIO peers to report drive performance numbers
|
||||
dps := globalNotificationSys.DrivePerfInfo()
|
||||
@@ -347,7 +390,7 @@ func (a adminAPIHandlers) PerfInfoHandler(w http.ResponseWriter, r *http.Request
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
case "cpu":
|
||||
// Get CPU load details from local server's cpu(s)
|
||||
cpu := localEndpointsCPULoad(globalEndpoints)
|
||||
cpu := localEndpointsCPULoad(globalEndpoints, r)
|
||||
// Notify all other MinIO peers to report cpu load numbers
|
||||
cpus := globalNotificationSys.CPULoadInfo()
|
||||
cpus = append(cpus, cpu)
|
||||
@@ -364,7 +407,7 @@ func (a adminAPIHandlers) PerfInfoHandler(w http.ResponseWriter, r *http.Request
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
case "mem":
|
||||
// Get mem usage details from local server(s)
|
||||
m := localEndpointsMemUsage(globalEndpoints)
|
||||
m := localEndpointsMemUsage(globalEndpoints, r)
|
||||
// Notify all other MinIO peers to report mem usage numbers
|
||||
mems := globalNotificationSys.MemUsageInfo()
|
||||
mems = append(mems, m)
|
||||
@@ -443,18 +486,12 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
thisAddr, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
peerLocks := globalNotificationSys.GetLocks(ctx)
|
||||
// Once we have received all the locks currently used from peers
|
||||
// add the local peer locks list as well.
|
||||
localLocks := globalLockServer.ll.DupLockMap()
|
||||
peerLocks = append(peerLocks, &PeerLocks{
|
||||
Addr: thisAddr.String(),
|
||||
Addr: getHostName(r),
|
||||
Locks: localLocks,
|
||||
})
|
||||
|
||||
@@ -682,12 +719,12 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Start writing response to client
|
||||
started = true
|
||||
setCommonHeaders(w)
|
||||
w.Header().Set("Content-Type", string(mimeJSON))
|
||||
w.Header().Set(xhttp.ContentType, "text/event-stream")
|
||||
// Set 200 OK status
|
||||
w.WriteHeader(200)
|
||||
}
|
||||
// Send whitespace and keep connection open
|
||||
w.Write([]byte("\n\r"))
|
||||
w.Write([]byte(" "))
|
||||
w.(http.Flusher).Flush()
|
||||
case hr := <-respCh:
|
||||
switch hr.apiErr {
|
||||
@@ -702,20 +739,20 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var errorRespJSON []byte
|
||||
if hr.errBody == "" {
|
||||
errorRespJSON = encodeResponseJSON(getAPIErrorResponse(ctx, hr.apiErr,
|
||||
r.URL.Path, w.Header().Get(responseRequestIDKey),
|
||||
w.Header().Get(responseDeploymentIDKey)))
|
||||
r.URL.Path, w.Header().Get(xhttp.AmzRequestID),
|
||||
globalDeploymentID))
|
||||
} else {
|
||||
errorRespJSON = encodeResponseJSON(APIErrorResponse{
|
||||
Code: hr.apiErr.Code,
|
||||
Message: hr.errBody,
|
||||
Resource: r.URL.Path,
|
||||
RequestID: w.Header().Get(responseRequestIDKey),
|
||||
HostID: w.Header().Get(responseDeploymentIDKey),
|
||||
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
||||
HostID: globalDeploymentID,
|
||||
})
|
||||
}
|
||||
if !started {
|
||||
setCommonHeaders(w)
|
||||
w.Header().Set("Content-Type", string(mimeJSON))
|
||||
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
||||
w.WriteHeader(hr.apiErr.HTTPStatusCode)
|
||||
}
|
||||
w.Write(errorRespJSON)
|
||||
@@ -786,6 +823,49 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
keepConnLive(w, respCh)
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HealBackgroundStatus")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this setup has an erasure coded backend.
|
||||
if !globalIsXL {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var bgHealStates []madmin.BgHealState
|
||||
|
||||
// Get local heal status first
|
||||
bgHealStates = append(bgHealStates, getLocalBackgroundHealStatus())
|
||||
|
||||
if globalIsDistXL {
|
||||
// Get heal status from other peers
|
||||
peersHealStates := globalNotificationSys.BackgroundHealStatus()
|
||||
bgHealStates = append(bgHealStates, peersHealStates...)
|
||||
}
|
||||
|
||||
// Aggregate healing result
|
||||
var aggregatedHealStateResult = madmin.BgHealState{}
|
||||
for _, state := range bgHealStates {
|
||||
aggregatedHealStateResult.ScannedItemsCount += state.ScannedItemsCount
|
||||
if aggregatedHealStateResult.LastHealActivity.Before(state.LastHealActivity) {
|
||||
aggregatedHealStateResult.LastHealActivity = state.LastHealActivity
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode(aggregatedHealStateResult); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetConfigHandler - GET /minio/admin/v1/config
|
||||
// Get config.json of this minio setup.
|
||||
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -941,8 +1021,18 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
if err := globalIAMSys.DeleteUser(accessKey); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to delete user.
|
||||
for _, nerr := range globalNotificationSys.DeleteUser(accessKey) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -977,6 +1067,157 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
// GetUserInfo - GET /minio/admin/v1/user-info
|
||||
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetUserInfo")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
name := vars["accessKey"]
|
||||
|
||||
userInfo, err := globalIAMSys.GetUserInfo(name)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(userInfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// UpdateGroupMembers - PUT /minio/admin/v1/update-group-members
|
||||
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "UpdateGroupMembers")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer r.Body.Close()
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var updReq madmin.GroupAddRemove
|
||||
err = json.Unmarshal(data, &updReq)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if updReq.IsRemove {
|
||||
err = globalIAMSys.RemoveUsersFromGroup(updReq.Group, updReq.Members)
|
||||
} else {
|
||||
err = globalIAMSys.AddUsersToGroup(updReq.Group, updReq.Members)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to load group.
|
||||
for _, nerr := range globalNotificationSys.LoadGroup(updReq.Group) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetGroup - /minio/admin/v1/group?group=mygroup1
|
||||
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetGroup")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
group := vars["group"]
|
||||
|
||||
gdesc, err := globalIAMSys.GetGroupDescription(group)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := json.Marshal(gdesc)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// ListGroups - GET /minio/admin/v1/groups
|
||||
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListGroups")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
groups := globalIAMSys.ListGroups()
|
||||
body, err := json.Marshal(groups)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SetGroupStatus - PUT /minio/admin/v1/set-group-status?group=mygroup1&status=enabled
|
||||
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetGroupStatus")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
group := vars["group"]
|
||||
status := vars["status"]
|
||||
|
||||
var err error
|
||||
if status == statusEnabled {
|
||||
err = globalIAMSys.SetGroupStatus(group, true)
|
||||
} else if status == statusDisabled {
|
||||
err = globalIAMSys.SetGroupStatus(group, false)
|
||||
} else {
|
||||
err = errInvalidArgument
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload user.
|
||||
for _, nerr := range globalNotificationSys.LoadGroup(group) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetUserStatus - PUT /minio/admin/v1/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
|
||||
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetUserStatus")
|
||||
@@ -1007,8 +1248,8 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload users
|
||||
for _, nerr := range globalNotificationSys.LoadUsers() {
|
||||
// Notify all other MinIO peers to reload user.
|
||||
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
@@ -1066,8 +1307,8 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload users
|
||||
for _, nerr := range globalNotificationSys.LoadUsers() {
|
||||
// Notify all other Minio peers to reload user
|
||||
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
@@ -1084,7 +1325,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
policies, err := globalIAMSys.ListCannedPolicies()
|
||||
policies, err := globalIAMSys.ListPolicies()
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1116,13 +1357,13 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalIAMSys.DeleteCannedPolicy(policyName); err != nil {
|
||||
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload users
|
||||
for _, nerr := range globalNotificationSys.LoadUsers() {
|
||||
// Notify all other MinIO peers to delete policy
|
||||
for _, nerr := range globalNotificationSys.DeletePolicy(policyName) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
@@ -1172,13 +1413,13 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalIAMSys.SetCannedPolicy(policyName, *iamPolicy); err != nil {
|
||||
if err = globalIAMSys.SetPolicy(policyName, *iamPolicy); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload users
|
||||
for _, nerr := range globalNotificationSys.LoadUsers() {
|
||||
// Notify all other MinIO peers to reload policy
|
||||
for _, nerr := range globalNotificationSys.LoadPolicy(policyName) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
@@ -1186,9 +1427,9 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
}
|
||||
|
||||
// SetUserPolicy - PUT /minio/admin/v1/set-user-policy?accessKey=<access_key>&name=<policy_name>
|
||||
func (a adminAPIHandlers) SetUserPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetUserPolicy")
|
||||
// SetPolicyForUserOrGroup - PUT /minio/admin/v1/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -1196,8 +1437,9 @@ func (a adminAPIHandlers) SetUserPolicy(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
policyName := vars["name"]
|
||||
policyName := vars["policyName"]
|
||||
entityName := vars["userOrGroup"]
|
||||
isGroup := vars["isGroup"] == "true"
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
@@ -1205,18 +1447,13 @@ func (a adminAPIHandlers) SetUserPolicy(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
// Custom IAM policies not allowed for admin user.
|
||||
if accessKey == globalServerConfig.GetCredential().AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
if err := globalIAMSys.PolicyDBSet(entityName, policyName, isGroup); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalIAMSys.SetUserPolicy(accessKey, policyName); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload users
|
||||
for _, nerr := range globalNotificationSys.LoadUsers() {
|
||||
// Notify all other MinIO peers to reload policy
|
||||
for _, nerr := range globalNotificationSys.LoadPolicyMapping(entityName, isGroup) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
@@ -1417,69 +1654,78 @@ func (a adminAPIHandlers) SetConfigKeysHandler(w http.ResponseWriter, r *http.Re
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// UpdateAdminCredsHandler - POST /minio/admin/v1/config/credential
|
||||
// ----------
|
||||
// Update admin credentials in a minio server
|
||||
func (a adminAPIHandlers) UpdateAdminCredentialsHandler(w http.ResponseWriter,
|
||||
r *http.Request) {
|
||||
|
||||
ctx := newContext(r, w, "UpdateCredentialsHandler")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
// Returns true if the trace.Info should be traced,
|
||||
// false if certain conditions are not met.
|
||||
// - input entry is not of the type *trace.Info*
|
||||
// - errOnly entries are to be traced, not status code 2xx, 3xx.
|
||||
// - all entries to be traced, if not trace only S3 API requests.
|
||||
func mustTrace(entry interface{}, trcAll, errOnly bool) bool {
|
||||
trcInfo, ok := entry.(trace.Info)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Avoid setting new credentials when they are already passed
|
||||
// by the environment. Deny if WORM is enabled.
|
||||
if globalIsEnvCreds || globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
trace := trcAll || !hasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+SlashSeparator)
|
||||
if errOnly {
|
||||
return trace && trcInfo.RespInfo.StatusCode >= http.StatusBadRequest
|
||||
}
|
||||
return trace
|
||||
}
|
||||
|
||||
// TraceHandler - POST /minio/admin/v1/trace
|
||||
// ----------
|
||||
// The handler sends http trace to the connected HTTP client.
|
||||
func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HTTPTrace")
|
||||
trcAll := r.URL.Query().Get("all") == "true"
|
||||
trcErr := r.URL.Query().Get("err") == "true"
|
||||
|
||||
// Validate request signature.
|
||||
adminAPIErr := checkAdminRequestAuthType(ctx, r, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set(xhttp.ContentType, "text/event-stream")
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
|
||||
// Trace Publisher and peer-trace-client uses nonblocking send and hence does not wait for slow receivers.
|
||||
// Use buffered channel to take care of burst sends or slow w.Write()
|
||||
traceCh := make(chan interface{}, 4000)
|
||||
|
||||
peers, err := getRestClients(getRemoteHosts(globalEndpoints))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
globalHTTPTrace.Subscribe(traceCh, doneCh, func(entry interface{}) bool {
|
||||
return mustTrace(entry, trcAll, trcErr)
|
||||
})
|
||||
|
||||
for _, peer := range peers {
|
||||
peer.Trace(traceCh, doneCh, trcAll, trcErr)
|
||||
}
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
for {
|
||||
select {
|
||||
case entry := <-traceCh:
|
||||
if err := enc.Encode(entry); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-keepAliveTicker.C:
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-GlobalServiceDoneCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalServerConfig.GetCredential().SecretKey
|
||||
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Decode request body
|
||||
var req madmin.SetCredsReq
|
||||
if err = json.Unmarshal(configBytes, &req); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrRequestBodyParse), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
creds, err := auth.CreateCredentials(req.AccessKey, req.SecretKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Acquire lock before updating global configuration.
|
||||
globalServerConfigMu.Lock()
|
||||
defer globalServerConfigMu.Unlock()
|
||||
|
||||
// Update local credentials in memory.
|
||||
globalServerConfig.SetCredential(creds)
|
||||
|
||||
// Set active creds.
|
||||
globalActiveCred = creds
|
||||
|
||||
if err = saveServerConfig(ctx, objectAPI, globalServerConfig); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Reply to the client before restarting minio server.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -84,7 +83,9 @@ var (
|
||||
"durable": false,
|
||||
"internal": false,
|
||||
"noWait": false,
|
||||
"autoDeleted": false
|
||||
"autoDeleted": false,
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"elasticsearch": {
|
||||
@@ -92,7 +93,9 @@ var (
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"url": "",
|
||||
"index": ""
|
||||
"index": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"kafka": {
|
||||
@@ -100,6 +103,8 @@ var (
|
||||
"enable": false,
|
||||
"brokers": null,
|
||||
"topic": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0,
|
||||
"tls": {
|
||||
"enable": false,
|
||||
"skipVerify": false,
|
||||
@@ -136,7 +141,9 @@ var (
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
"database": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
@@ -149,6 +156,8 @@ var (
|
||||
"token": "",
|
||||
"secure": false,
|
||||
"pingInterval": 0,
|
||||
"queueDir": "",
|
||||
"queueLimit": 0,
|
||||
"streaming": {
|
||||
"enable": false,
|
||||
"clusterID": "",
|
||||
@@ -165,7 +174,9 @@ var (
|
||||
"tls": {
|
||||
"enable": false,
|
||||
"skipVerify": false
|
||||
}
|
||||
},
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"postgresql": {
|
||||
@@ -178,7 +189,9 @@ var (
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
"database": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
@@ -187,13 +200,17 @@ var (
|
||||
"format": "namespace",
|
||||
"address": "",
|
||||
"password": "",
|
||||
"key": ""
|
||||
"key": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"webhook": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"endpoint": ""
|
||||
"endpoint": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -267,7 +284,9 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
initNSLock(isDistXL)
|
||||
|
||||
// Init global heal state
|
||||
initAllHealState(globalIsXL)
|
||||
if globalIsXL {
|
||||
globalAllHealState = initHealState()
|
||||
}
|
||||
|
||||
globalConfigSys = NewConfigSys()
|
||||
|
||||
@@ -298,60 +317,6 @@ func (atb *adminXLTestBed) TearDown() {
|
||||
resetTestGlobals()
|
||||
}
|
||||
|
||||
func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
|
||||
// Create an object myobject under bucket mybucket.
|
||||
bucketName := "mybucket"
|
||||
err := atb.objLayer.MakeBucketWithLocation(context.Background(), bucketName, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make bucket %s - %v", bucketName,
|
||||
err)
|
||||
}
|
||||
|
||||
// create some objects
|
||||
{
|
||||
objName := "myobject"
|
||||
for i := 0; i < 10; i++ {
|
||||
objectName := fmt.Sprintf("%s-%d", objName, i)
|
||||
_, err = atb.objLayer.PutObject(context.Background(), bucketName, objectName,
|
||||
mustGetPutObjReader(t, bytes.NewReader([]byte("hello")),
|
||||
int64(len("hello")), "", ""), ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create %s - %v", objectName,
|
||||
err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create a multipart upload (incomplete)
|
||||
{
|
||||
objName := "mpObject"
|
||||
uploadID, err := atb.objLayer.NewMultipartUpload(context.Background(), bucketName,
|
||||
objName, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("mp new error: %v", err)
|
||||
}
|
||||
|
||||
_, err = atb.objLayer.PutObjectPart(context.Background(), bucketName, objName,
|
||||
uploadID, 3, mustGetPutObjReader(t, bytes.NewReader(
|
||||
[]byte("hello")), int64(len("hello")), "", ""), ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("mp put error: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (atb *adminXLTestBed) CleanupHealTestData(t *testing.T) {
|
||||
bucketName := "mybucket"
|
||||
objName := "myobject"
|
||||
for i := 0; i < 10; i++ {
|
||||
atb.objLayer.DeleteObject(context.Background(), bucketName,
|
||||
fmt.Sprintf("%s-%d", objName, i))
|
||||
}
|
||||
|
||||
atb.objLayer.DeleteBucket(context.Background(), bucketName)
|
||||
}
|
||||
|
||||
// initTestObjLayer - Helper function to initialize an XL-based object
|
||||
// layer and set globalObjectAPI.
|
||||
func initTestXLObjLayer() (ObjectLayer, []string, error) {
|
||||
@@ -599,86 +564,6 @@ func TestServiceRestartHandler(t *testing.T) {
|
||||
testServicesCmdHandler(restartCmd, t)
|
||||
}
|
||||
|
||||
// Test for service set creds management REST API.
|
||||
func TestServiceSetCreds(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// Initialize admin peers to make admin RPC calls. Note: In a
|
||||
// single node setup, this degenerates to a simple function
|
||||
// call under the hood.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
|
||||
credentials := globalServerConfig.GetCredential()
|
||||
|
||||
testCases := []struct {
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
EnvKeysSet bool
|
||||
ExpectedStatusCode int
|
||||
}{
|
||||
// Bad secret key
|
||||
{"minio", "minio", false, http.StatusBadRequest},
|
||||
// Bad secret key set from the env
|
||||
{"minio", "minio", true, http.StatusMethodNotAllowed},
|
||||
// Good keys set from the env
|
||||
{"minio", "minio123", true, http.StatusMethodNotAllowed},
|
||||
// Successful operation should be the last one to
|
||||
// not change server credentials during tests.
|
||||
{"minio", "minio123", false, http.StatusOK},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
// Set or unset environement keys
|
||||
globalIsEnvCreds = testCase.EnvKeysSet
|
||||
|
||||
// Construct setCreds request body
|
||||
body, err := json.Marshal(madmin.SetCredsReq{
|
||||
AccessKey: testCase.AccessKey,
|
||||
SecretKey: testCase.SecretKey})
|
||||
if err != nil {
|
||||
t.Fatalf("JSONify err: %v", err)
|
||||
}
|
||||
|
||||
ebody, err := madmin.EncryptData(credentials.SecretKey, body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Construct setCreds request
|
||||
req, err := getServiceCmdRequest(setCreds, credentials, ebody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build service status request %v", err)
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
// Execute request
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
|
||||
// Check if the http code response is expected
|
||||
if rec.Code != testCase.ExpectedStatusCode {
|
||||
t.Errorf("Test %d: Wrong status code, expected = %d, found = %d", i+1, testCase.ExpectedStatusCode, rec.Code)
|
||||
resp, _ := ioutil.ReadAll(rec.Body)
|
||||
t.Errorf("Expected to receive %d status code but received %d. Body (%s)",
|
||||
http.StatusOK, rec.Code, string(resp))
|
||||
}
|
||||
|
||||
// If we got 200 OK, check if new credentials are really set
|
||||
if rec.Code == http.StatusOK {
|
||||
cred := globalServerConfig.GetCredential()
|
||||
if cred.AccessKey != testCase.AccessKey {
|
||||
t.Errorf("Test %d: Wrong access key, expected = %s, found = %s", i+1, testCase.AccessKey, cred.AccessKey)
|
||||
}
|
||||
if cred.SecretKey != testCase.SecretKey {
|
||||
t.Errorf("Test %d: Wrong secret key, expected = %s, found = %s", i+1, testCase.SecretKey, cred.SecretKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// buildAdminRequest - helper function to build an admin API request.
|
||||
func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
|
||||
@@ -835,9 +720,6 @@ func TestAdminServerInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, serverInfo := range results {
|
||||
if len(serverInfo.Addr) == 0 {
|
||||
t.Error("Expected server address to be non empty")
|
||||
}
|
||||
if serverInfo.Error != "" {
|
||||
t.Errorf("Unexpected error = %v\n", serverInfo.Error)
|
||||
}
|
||||
|
||||
@@ -96,22 +96,15 @@ type allHealState struct {
|
||||
healSeqMap map[string]*healSequence
|
||||
}
|
||||
|
||||
var (
|
||||
// global server heal state
|
||||
globalAllHealState allHealState
|
||||
)
|
||||
|
||||
// initAllHealState - initialize healing apparatus
|
||||
func initAllHealState(isErasureMode bool) {
|
||||
if !isErasureMode {
|
||||
return
|
||||
}
|
||||
|
||||
globalAllHealState = allHealState{
|
||||
// initHealState - initialize healing apparatus
|
||||
func initHealState() *allHealState {
|
||||
healState := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
}
|
||||
|
||||
go globalAllHealState.periodicHealSeqsClean()
|
||||
go healState.periodicHealSeqsClean()
|
||||
|
||||
return healState
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean() {
|
||||
@@ -138,6 +131,19 @@ func (ahs *allHealState) periodicHealSeqsClean() {
|
||||
}
|
||||
}
|
||||
|
||||
// getHealSequenceByToken - Retrieve a heal sequence by token. The second
|
||||
// argument returns if a heal sequence actually exists.
|
||||
func (ahs *allHealState) getHealSequenceByToken(token string) (h *healSequence, exists bool) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
for _, healSeq := range ahs.healSeqMap {
|
||||
if healSeq.clientToken == token {
|
||||
return healSeq, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getHealSequence - Retrieve a heal sequence by path. The second
|
||||
// argument returns if a heal sequence actually exists.
|
||||
func (ahs *allHealState) getHealSequence(path string) (h *healSequence, exists bool) {
|
||||
@@ -305,6 +311,14 @@ type healSequence struct {
|
||||
// path is just pathJoin(bucket, objPrefix)
|
||||
path string
|
||||
|
||||
// List of entities (format, buckets, objects) to heal
|
||||
sourceCh chan string
|
||||
|
||||
// Report healing progress, false if this is a background
|
||||
// healing since currently there is no entity which will
|
||||
// receive realtime healing status
|
||||
reportProgress bool
|
||||
|
||||
// time at which heal sequence was started
|
||||
startTime time.Time
|
||||
|
||||
@@ -334,6 +348,12 @@ type healSequence struct {
|
||||
// the last result index sent to client
|
||||
lastSentResultIndex int64
|
||||
|
||||
// Number of total items scanned
|
||||
scannedItemsCount int64
|
||||
|
||||
// The time of the last scan/heal activity
|
||||
lastHealActivity time.Time
|
||||
|
||||
// Holds the request-info for logging
|
||||
ctx context.Context
|
||||
}
|
||||
@@ -348,14 +368,15 @@ func newHealSequence(bucket, objPrefix, clientAddr string,
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
|
||||
return &healSequence{
|
||||
bucket: bucket,
|
||||
objPrefix: objPrefix,
|
||||
path: pathJoin(bucket, objPrefix),
|
||||
startTime: UTCNow(),
|
||||
clientToken: mustGetUUID(),
|
||||
clientAddress: clientAddr,
|
||||
forceStarted: forceStart,
|
||||
settings: hs,
|
||||
bucket: bucket,
|
||||
objPrefix: objPrefix,
|
||||
path: pathJoin(bucket, objPrefix),
|
||||
reportProgress: true,
|
||||
startTime: UTCNow(),
|
||||
clientToken: mustGetUUID(),
|
||||
clientAddress: clientAddr,
|
||||
forceStarted: forceStart,
|
||||
settings: hs,
|
||||
currentStatus: healSequenceStatus{
|
||||
Summary: healNotStartedStatus,
|
||||
HealSettings: hs,
|
||||
@@ -484,7 +505,11 @@ func (h *healSequence) healSequenceStart() {
|
||||
h.currentStatus.StartTime = UTCNow()
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
|
||||
go h.traverseAndHeal()
|
||||
if h.sourceCh == nil {
|
||||
go h.traverseAndHeal()
|
||||
} else {
|
||||
go h.healFromSourceCh()
|
||||
}
|
||||
|
||||
select {
|
||||
case err, ok := <-h.traverseAndHealDoneCh:
|
||||
@@ -519,6 +544,95 @@ func (h *healSequence) healSequenceStart() {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(path string, healType madmin.HealItemType) error {
|
||||
var respCh = make(chan healResult)
|
||||
defer close(respCh)
|
||||
// Send heal request
|
||||
globalBackgroundHealing.queueHealTask(healTask{path: path, responseCh: respCh, opts: h.settings})
|
||||
// Wait for answer and push result to the client
|
||||
res := <-respCh
|
||||
if !h.reportProgress {
|
||||
return nil
|
||||
}
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and return success.
|
||||
if isErrObjectNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
}
|
||||
|
||||
func (h *healSequence) healItemsFromSourceCh() error {
|
||||
h.lastHealActivity = UTCNow()
|
||||
|
||||
// Start healing the config prefix.
|
||||
if err := h.healMinioSysMeta(minioConfigPrefix)(); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
// Start healing the bucket config prefix.
|
||||
if err := h.healMinioSysMeta(bucketConfigPrefix)(); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
for path := range h.sourceCh {
|
||||
|
||||
var itemType madmin.HealItemType
|
||||
switch {
|
||||
case path == SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
case !strings.Contains(path, SlashSeparator):
|
||||
itemType = madmin.HealItemBucket
|
||||
default:
|
||||
itemType = madmin.HealItemObject
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(path, itemType); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
h.scannedItemsCount++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *healSequence) healFromSourceCh() {
|
||||
if err := h.healItemsFromSourceCh(); err != nil {
|
||||
h.traverseAndHealDoneCh <- err
|
||||
}
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
func (h *healSequence) healItems() error {
|
||||
// Start with format healing
|
||||
if err := h.healDiskFormat(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start healing the config prefix.
|
||||
if err := h.healMinioSysMeta(minioConfigPrefix)(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start healing the bucket config prefix.
|
||||
if err := h.healMinioSysMeta(bucketConfigPrefix)(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Heal buckets and objects
|
||||
return h.healBuckets()
|
||||
}
|
||||
|
||||
// traverseAndHeal - traverses on-disk data and performs healing
|
||||
// according to settings. At each "safe" point it also checks if an
|
||||
// external quit signal has been received and quits if so. Since the
|
||||
@@ -527,31 +641,10 @@ func (h *healSequence) healSequenceStart() {
|
||||
// has to wait until a safe point is reached, such as between scanning
|
||||
// two objects.
|
||||
func (h *healSequence) traverseAndHeal() {
|
||||
var err error
|
||||
checkErr := func(f func() error) {
|
||||
switch {
|
||||
case err != nil:
|
||||
return
|
||||
case h.isQuitting():
|
||||
if err := h.healItems(); err != nil {
|
||||
if h.isQuitting() {
|
||||
err = errHealStopSignalled
|
||||
return
|
||||
}
|
||||
err = f()
|
||||
}
|
||||
|
||||
// Start with format healing
|
||||
checkErr(h.healDiskFormat)
|
||||
|
||||
// Start healing the config prefix.
|
||||
checkErr(h.healMinioSysMeta(minioConfigPrefix))
|
||||
|
||||
// Start healing the bucket config prefix.
|
||||
checkErr(h.healMinioSysMeta(bucketConfigPrefix))
|
||||
|
||||
// Heal buckets and objects
|
||||
checkErr(h.healBuckets)
|
||||
|
||||
if err != nil {
|
||||
h.traverseAndHealDoneCh <- err
|
||||
}
|
||||
|
||||
@@ -575,17 +668,14 @@ func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
res, herr := objectAPI.HealObject(h.ctx, bucket, object, h.settings.DryRun, h.settings.Remove, h.settings.ScanMode)
|
||||
|
||||
herr := h.queueHealTask(pathJoin(bucket, object), madmin.HealItemBucketMetadata)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if isErrObjectNotFound(herr) {
|
||||
return nil
|
||||
}
|
||||
if herr != nil {
|
||||
return herr
|
||||
}
|
||||
res.Type = madmin.HealItemBucketMetadata
|
||||
return h.pushHealResultItem(res)
|
||||
return herr
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -603,26 +693,7 @@ func (h *healSequence) healDiskFormat() error {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
res, err := objectAPI.HealFormat(h.ctx, h.settings.DryRun)
|
||||
// return any error, ignore error returned when disks have
|
||||
// already healed.
|
||||
if err != nil && err != errNoHealRequired {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers only if healing succeeded.
|
||||
if err == nil {
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(h.settings.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(h.ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(h.ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push format heal result
|
||||
return h.pushHealResultItem(res)
|
||||
return h.queueHealTask(SlashSeparator, madmin.HealItemMetadata)
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
@@ -664,13 +735,7 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
result, err := objectAPI.HealBucket(h.ctx, bucket, h.settings.DryRun, h.settings.Remove)
|
||||
// handle heal-bucket error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = h.pushHealResultItem(result); err != nil {
|
||||
if err := h.queueHealTask(bucket, madmin.HealItemBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -678,7 +743,7 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
if h.objPrefix != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
// and if so heal it.
|
||||
_, err = objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{})
|
||||
_, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{})
|
||||
if err == nil {
|
||||
if err = h.healObject(bucket, h.objPrefix); err != nil {
|
||||
return err
|
||||
@@ -689,8 +754,7 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = objectAPI.HealObjects(h.ctx, bucket,
|
||||
h.objPrefix, h.healObject); err != nil {
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.healObject); err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
return nil
|
||||
@@ -702,28 +766,11 @@ func (h *healSequence) healObject(bucket, object string) error {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
if globalHTTPServer != nil {
|
||||
// Wait at max 1 minute for an inprogress request
|
||||
// before proceeding to heal
|
||||
waitCount := 60
|
||||
// Any requests in progress, delay the heal.
|
||||
for globalHTTPServer.GetRequestCount() > 2 && waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
hri, err := objectAPI.HealObject(h.ctx, bucket, object, h.settings.DryRun, h.settings.Remove, h.settings.ScanMode)
|
||||
if isErrObjectNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
hri.Detail = err.Error()
|
||||
}
|
||||
return h.pushHealResultItem(hri)
|
||||
return h.queueHealTask(pathJoin(bucket, object), madmin.HealItemObject)
|
||||
}
|
||||
|
||||
@@ -61,6 +61,8 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
|
||||
adminV1Router.Methods(http.MethodPost).Path("/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
|
||||
|
||||
/// Health operations
|
||||
|
||||
}
|
||||
@@ -74,8 +76,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
/// Config operations
|
||||
if enableConfigOps {
|
||||
// Update credentials
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config/credential").HandlerFunc(httpTraceHdrs(adminAPI.UpdateAdminCredentialsHandler))
|
||||
// Get config
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
@@ -96,20 +96,38 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
// Add user IAM
|
||||
adminV1Router.Methods(http.MethodPut).Path("/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-policy").HandlerFunc(httpTraceHdrs(adminAPI.SetUserPolicy)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("name", "{name:.*}")
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// Remove policy IAM
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Set user or group policy
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-or-group-policy").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
|
||||
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
|
||||
|
||||
// Remove user IAM
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// List users
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
|
||||
// User info
|
||||
adminV1Router.Methods(http.MethodGet).Path("/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Add/Remove members from group
|
||||
adminV1Router.Methods(http.MethodPut).Path("/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
|
||||
|
||||
// Get Group
|
||||
adminV1Router.Methods(http.MethodGet).Path("/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
|
||||
|
||||
// List Groups
|
||||
adminV1Router.Methods(http.MethodGet).Path("/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
|
||||
|
||||
// Set Group Status
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// List policies
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
}
|
||||
@@ -118,6 +136,8 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
// Top locks
|
||||
adminV1Router.Methods(http.MethodGet).Path("/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
|
||||
// HTTP Trace
|
||||
adminV1Router.Methods(http.MethodGet).Path("/trace").HandlerFunc(adminAPI.TraceHandler)
|
||||
// If none of the routes match, return error.
|
||||
adminV1Router.NotFoundHandler = http.HandlerFunc(httpTraceHdrs(notFoundHandlerJSON))
|
||||
}
|
||||
|
||||
@@ -20,13 +20,6 @@ import (
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
const (
|
||||
// Response request id.
|
||||
responseRequestIDKey = "x-amz-request-id"
|
||||
// Deployment id.
|
||||
responseDeploymentIDKey = "x-minio-deployment-id"
|
||||
)
|
||||
|
||||
// ObjectIdentifier carries key name for the object to delete.
|
||||
type ObjectIdentifier struct {
|
||||
ObjectName string `xml:"Key"`
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
minio "github.com/minio/minio-go"
|
||||
minio "github.com/minio/minio-go/v6"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
@@ -51,6 +51,7 @@ type APIErrorResponse struct {
|
||||
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
|
||||
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
|
||||
Resource string
|
||||
Region string `xml:"Region,omitempty" json:"Region,omitempty"`
|
||||
RequestID string `xml:"RequestId" json:"RequestId"`
|
||||
HostID string `xml:"HostId" json:"HostId"`
|
||||
}
|
||||
@@ -65,6 +66,7 @@ const (
|
||||
ErrBadDigest
|
||||
ErrEntityTooSmall
|
||||
ErrEntityTooLarge
|
||||
ErrPolicyTooLarge
|
||||
ErrIncompleteBody
|
||||
ErrInternalError
|
||||
ErrInvalidAccessKeyID
|
||||
@@ -90,6 +92,7 @@ const (
|
||||
ErrMissingRequestBodyError
|
||||
ErrNoSuchBucket
|
||||
ErrNoSuchBucketPolicy
|
||||
ErrNoSuchBucketLifecycle
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrNoSuchVersion
|
||||
@@ -137,6 +140,7 @@ const (
|
||||
ErrSlowDown
|
||||
ErrInvalidPrefixMarker
|
||||
ErrBadRequest
|
||||
ErrKeyTooLongError
|
||||
// Add new error codes here.
|
||||
|
||||
// SSE-S3 related API errors
|
||||
@@ -185,6 +189,7 @@ const (
|
||||
ErrRequestBodyParse
|
||||
ErrObjectExistsAsDirectory
|
||||
ErrInvalidObjectName
|
||||
ErrInvalidObjectNamePrefixSlash
|
||||
ErrInvalidResourceName
|
||||
ErrServerNotInitialized
|
||||
ErrOperationTimedOut
|
||||
@@ -198,6 +203,8 @@ const (
|
||||
|
||||
ErrMalformedJSON
|
||||
ErrAdminNoSuchUser
|
||||
ErrAdminNoSuchGroup
|
||||
ErrAdminGroupNotEmpty
|
||||
ErrAdminNoSuchPolicy
|
||||
ErrAdminInvalidArgument
|
||||
ErrAdminInvalidAccessKey
|
||||
@@ -398,6 +405,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Your proposed upload exceeds the maximum allowed object size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPolicyTooLarge: {
|
||||
Code: "PolicyTooLarge",
|
||||
Description: "Policy exceeds the maximum allowed document size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIncompleteBody: {
|
||||
Code: "IncompleteBody",
|
||||
Description: "You did not provide the number of bytes specified by the Content-Length HTTP header.",
|
||||
@@ -458,6 +470,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The bucket policy does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchBucketLifecycle: {
|
||||
Code: "NoSuchBucketLifecycle",
|
||||
Description: "The bucket lifecycle configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchKey: {
|
||||
Code: "NoSuchKey",
|
||||
Description: "The specified key does not exist.",
|
||||
@@ -675,6 +692,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "400 BadRequest",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrKeyTooLongError: {
|
||||
Code: "KeyTooLongError",
|
||||
Description: "Your key is too long",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// FIXME: Actual XML error response also contains the header which missed in list of signed header parameters.
|
||||
ErrUnsignedHeaders: {
|
||||
@@ -873,21 +895,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Object name already exists as a directory.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrReadQuorum: {
|
||||
Code: "XMinioReadQuorum",
|
||||
Description: "Multiple disk failures, unable to reconstruct data.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrWriteQuorum: {
|
||||
Code: "XMinioWriteQuorum",
|
||||
Description: "Multiple disks failures, unable to write data.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrInvalidObjectName: {
|
||||
Code: "XMinioInvalidObjectName",
|
||||
Description: "Object name contains unsupported characters.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidObjectNamePrefixSlash: {
|
||||
Code: "XMinioInvalidObjectName",
|
||||
Description: "Object name contains a leading slash.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidResourceName: {
|
||||
Code: "XMinioInvalidResourceName",
|
||||
Description: "Resource name contains bad components such as \"..\" or \".\".",
|
||||
@@ -908,6 +925,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The specified user does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminNoSuchGroup: {
|
||||
Code: "XMinioAdminNoSuchGroup",
|
||||
Description: "The specified group does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminGroupNotEmpty: {
|
||||
Code: "XMinioAdminGroupNotEmpty",
|
||||
Description: "The specified group is not empty - cannot remove it.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchPolicy: {
|
||||
Code: "XMinioAdminNoSuchPolicy",
|
||||
Description: "The canned policy does not exist.",
|
||||
@@ -1479,13 +1506,16 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
if err == nil {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
switch err {
|
||||
case errInvalidArgument:
|
||||
apiErr = ErrAdminInvalidArgument
|
||||
case errNoSuchUser:
|
||||
apiErr = ErrAdminNoSuchUser
|
||||
case errNoSuchGroup:
|
||||
apiErr = ErrAdminNoSuchGroup
|
||||
case errGroupNotEmpty:
|
||||
apiErr = ErrAdminGroupNotEmpty
|
||||
case errNoSuchPolicy:
|
||||
apiErr = ErrAdminNoSuchPolicy
|
||||
case errSignatureMismatch:
|
||||
@@ -1496,6 +1526,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrEntityTooLarge
|
||||
case errDataTooSmall:
|
||||
apiErr = ErrEntityTooSmall
|
||||
case errAuthentication:
|
||||
apiErr = ErrAccessDenied
|
||||
case auth.ErrInvalidAccessKeyLength:
|
||||
apiErr = ErrAdminInvalidAccessKey
|
||||
case auth.ErrInvalidSecretKeyLength:
|
||||
@@ -1529,6 +1561,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrKMSAuthFailure
|
||||
case errOperationTimedOut, context.Canceled, context.DeadlineExceeded:
|
||||
apiErr = ErrOperationTimedOut
|
||||
case errDiskNotFound:
|
||||
apiErr = ErrSlowDown
|
||||
}
|
||||
|
||||
// Compression errors
|
||||
@@ -1581,14 +1615,16 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case ObjectNameInvalid:
|
||||
apiErr = ErrInvalidObjectName
|
||||
case ObjectNamePrefixAsSlash:
|
||||
apiErr = ErrInvalidObjectNamePrefixSlash
|
||||
case InvalidUploadID:
|
||||
apiErr = ErrNoSuchUpload
|
||||
case InvalidPart:
|
||||
apiErr = ErrInvalidPart
|
||||
case InsufficientWriteQuorum:
|
||||
apiErr = ErrWriteQuorum
|
||||
apiErr = ErrSlowDown
|
||||
case InsufficientReadQuorum:
|
||||
apiErr = ErrReadQuorum
|
||||
apiErr = ErrSlowDown
|
||||
case UnsupportedDelimiter:
|
||||
apiErr = ErrNotImplemented
|
||||
case InvalidMarkerPrefixCombination:
|
||||
@@ -1615,6 +1651,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrUnsupportedMetadata
|
||||
case BucketPolicyNotFound:
|
||||
apiErr = ErrNoSuchBucketPolicy
|
||||
case BucketLifecycleNotFound:
|
||||
apiErr = ErrNoSuchBucketLifecycle
|
||||
case *event.ErrInvalidEventName:
|
||||
apiErr = ErrEventNotification
|
||||
case *event.ErrInvalidARN:
|
||||
@@ -1641,6 +1679,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrBackendDown
|
||||
case crypto.Error:
|
||||
apiErr = ErrObjectTampered
|
||||
case ObjectNameTooLong:
|
||||
apiErr = ErrKeyTooLongError
|
||||
default:
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
@@ -1738,6 +1778,7 @@ func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalServerConfig.GetRegion(),
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
}
|
||||
|
||||
@@ -39,8 +39,8 @@ var toAPIErrorTests = []struct {
|
||||
{err: ObjectNameInvalid{}, errCode: ErrInvalidObjectName},
|
||||
{err: InvalidUploadID{}, errCode: ErrNoSuchUpload},
|
||||
{err: InvalidPart{}, errCode: ErrInvalidPart},
|
||||
{err: InsufficientReadQuorum{}, errCode: ErrReadQuorum},
|
||||
{err: InsufficientWriteQuorum{}, errCode: ErrWriteQuorum},
|
||||
{err: InsufficientReadQuorum{}, errCode: ErrSlowDown},
|
||||
{err: InsufficientWriteQuorum{}, errCode: ErrSlowDown},
|
||||
{err: UnsupportedDelimiter{}, errCode: ErrNotImplemented},
|
||||
{err: InvalidMarkerPrefixCombination{}, errCode: ErrNotImplemented},
|
||||
{err: InvalidUploadIDKeyCombination{}, errCode: ErrNotImplemented},
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
||||
// Returns a hexadecimal representation of time at the
|
||||
@@ -36,13 +37,13 @@ func mustGetRequestID(t time.Time) string {
|
||||
|
||||
// Write http common headers
|
||||
func setCommonHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set("Server", "MinIO/"+ReleaseTag)
|
||||
w.Header().Set(xhttp.ServerInfo, "MinIO/"+ReleaseTag)
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerConfig.GetRegion(); region != "" {
|
||||
w.Header().Set("X-Amz-Bucket-Region", region)
|
||||
w.Header().Set(xhttp.AmzBucketRegion, region)
|
||||
}
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
w.Header().Set(xhttp.AcceptRanges, "bytes")
|
||||
|
||||
// Remove sensitive information
|
||||
crypto.RemoveSensitiveHeaders(w.Header())
|
||||
@@ -72,23 +73,23 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
|
||||
// Set last modified time.
|
||||
lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat)
|
||||
w.Header().Set("Last-Modified", lastModified)
|
||||
w.Header().Set(xhttp.LastModified, lastModified)
|
||||
|
||||
// Set Etag if available.
|
||||
if objInfo.ETag != "" {
|
||||
w.Header()["ETag"] = []string{"\"" + objInfo.ETag + "\""}
|
||||
w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""}
|
||||
}
|
||||
|
||||
if objInfo.ContentType != "" {
|
||||
w.Header().Set("Content-Type", objInfo.ContentType)
|
||||
w.Header().Set(xhttp.ContentType, objInfo.ContentType)
|
||||
}
|
||||
|
||||
if objInfo.ContentEncoding != "" {
|
||||
w.Header().Set("Content-Encoding", objInfo.ContentEncoding)
|
||||
w.Header().Set(xhttp.ContentEncoding, objInfo.ContentEncoding)
|
||||
}
|
||||
|
||||
if !objInfo.Expires.IsZero() {
|
||||
w.Header().Set("Expires", objInfo.Expires.UTC().Format(http.TimeFormat))
|
||||
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// Set all other user defined metadata.
|
||||
@@ -124,10 +125,10 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
}
|
||||
|
||||
// Set content length.
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(rangeLen, 10))
|
||||
w.Header().Set(xhttp.ContentLength, strconv.FormatInt(rangeLen, 10))
|
||||
if rs != nil {
|
||||
contentRange := fmt.Sprintf("bytes %d-%d/%d", start, start+rangeLen-1, totalObjectSize)
|
||||
w.Header().Set("Content-Range", contentRange)
|
||||
w.Header().Set(xhttp.ContentRange, contentRange)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -42,6 +42,27 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
|
||||
return
|
||||
}
|
||||
|
||||
func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType, versionIDMarker string, errCode APIErrorCode) {
|
||||
errCode = ErrNone
|
||||
|
||||
if values.Get("max-keys") != "" {
|
||||
var err error
|
||||
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
|
||||
errCode = ErrInvalidMaxKeys
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("key-marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
versionIDMarker = values.Get("version-id-marker")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse bucket url queries for ListObjects V2.
|
||||
func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int, encodingType string, errCode APIErrorCode) {
|
||||
errCode = ErrNone
|
||||
|
||||
@@ -36,7 +36,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
||||
"prefix": []string{"photos/"},
|
||||
"continuation-token": []string{"token"},
|
||||
"start-after": []string{"start-after"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"fetch-owner": []string{"true"},
|
||||
"max-keys": []string{"100"},
|
||||
"encoding-type": []string{"gzip"},
|
||||
@@ -44,7 +44,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
||||
prefix: "photos/",
|
||||
token: "token",
|
||||
startAfter: "start-after",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
fetchOwner: true,
|
||||
maxKeys: 100,
|
||||
encodingType: "gzip",
|
||||
@@ -55,14 +55,14 @@ func TestListObjectsV2Resources(t *testing.T) {
|
||||
"prefix": []string{"photos/"},
|
||||
"continuation-token": []string{"token"},
|
||||
"start-after": []string{"start-after"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"fetch-owner": []string{"true"},
|
||||
"encoding-type": []string{"gzip"},
|
||||
},
|
||||
prefix: "photos/",
|
||||
token: "token",
|
||||
startAfter: "start-after",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
fetchOwner: true,
|
||||
maxKeys: 1000,
|
||||
encodingType: "gzip",
|
||||
@@ -73,7 +73,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
||||
"prefix": []string{"photos/"},
|
||||
"continuation-token": []string{""},
|
||||
"start-after": []string{"start-after"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"fetch-owner": []string{"true"},
|
||||
"encoding-type": []string{"gzip"},
|
||||
},
|
||||
@@ -130,13 +130,13 @@ func TestListObjectsV1Resources(t *testing.T) {
|
||||
values: url.Values{
|
||||
"prefix": []string{"photos/"},
|
||||
"marker": []string{"test"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"max-keys": []string{"100"},
|
||||
"encoding-type": []string{"gzip"},
|
||||
},
|
||||
prefix: "photos/",
|
||||
marker: "test",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
maxKeys: 100,
|
||||
encodingType: "gzip",
|
||||
},
|
||||
@@ -144,12 +144,12 @@ func TestListObjectsV1Resources(t *testing.T) {
|
||||
values: url.Values{
|
||||
"prefix": []string{"photos/"},
|
||||
"marker": []string{"test"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"encoding-type": []string{"gzip"},
|
||||
},
|
||||
prefix: "photos/",
|
||||
marker: "test",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
maxKeys: 1000,
|
||||
encodingType: "gzip",
|
||||
},
|
||||
|
||||
@@ -22,9 +22,11 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
)
|
||||
@@ -42,6 +44,45 @@ type LocationResponse struct {
|
||||
Location string `xml:",chardata"`
|
||||
}
|
||||
|
||||
// ListVersionsResponse - format for list bucket versions response.
|
||||
type ListVersionsResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResult" json:"-"`
|
||||
|
||||
Name string
|
||||
Prefix string
|
||||
KeyMarker string
|
||||
|
||||
// When response is truncated (the IsTruncated element value in the response
|
||||
// is true), you can use the key name in this field as marker in the subsequent
|
||||
// request to get next set of objects. Server lists objects in alphabetical
|
||||
// order Note: This element is returned only if you have delimiter request parameter
|
||||
// specified. If response does not include the NextMaker and it is truncated,
|
||||
// you can use the value of the last Key in the response as the marker in the
|
||||
// subsequent request to get the next set of object keys.
|
||||
NextKeyMarker string `xml:"NextKeyMarker,omitempty"`
|
||||
|
||||
// When the number of responses exceeds the value of MaxKeys,
|
||||
// NextVersionIdMarker specifies the first object version not
|
||||
// returned that satisfies the search criteria. Use this value
|
||||
// for the version-id-marker request parameter in a subsequent request.
|
||||
NextVersionIDMarker string `xml:"NextVersionIdMarker"`
|
||||
|
||||
// Marks the last version of the Key returned in a truncated response.
|
||||
VersionIDMarker string `xml:"VersionIdMarker"`
|
||||
|
||||
MaxKeys int
|
||||
Delimiter string
|
||||
// A flag that indicates whether or not ListObjects returned all of the results
|
||||
// that satisfied the search criteria.
|
||||
IsTruncated bool
|
||||
|
||||
CommonPrefixes []CommonPrefix
|
||||
Versions []ObjectVersion
|
||||
|
||||
// Encoding type used to encode object keys in the response.
|
||||
EncodingType string `xml:"EncodingType,omitempty"`
|
||||
}
|
||||
|
||||
// ListObjectsResponse - format for list objects response.
|
||||
type ListObjectsResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"`
|
||||
@@ -189,6 +230,14 @@ type Bucket struct {
|
||||
CreationDate string // time string of format "2006-01-02T15:04:05.000Z"
|
||||
}
|
||||
|
||||
// ObjectVersion container for object version metadata
|
||||
type ObjectVersion struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version" json:"-"`
|
||||
Object
|
||||
VersionID string `xml:"VersionId"`
|
||||
IsLatest bool
|
||||
}
|
||||
|
||||
// Object container for object metadata
|
||||
type Object struct {
|
||||
Key string
|
||||
@@ -291,14 +340,14 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
}
|
||||
u := &url.URL{
|
||||
Host: r.Host,
|
||||
Path: path.Join(slashSeparator, bucket, object),
|
||||
Path: path.Join(SlashSeparator, bucket, object),
|
||||
Scheme: proto,
|
||||
}
|
||||
// If domain is set then we need to use bucket DNS style.
|
||||
for _, domain := range domains {
|
||||
if strings.Contains(r.Host, domain) {
|
||||
u.Host = bucket + "." + r.Host
|
||||
u.Path = path.Join(slashSeparator, object)
|
||||
u.Path = path.Join(SlashSeparator, object)
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -326,6 +375,52 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
return data
|
||||
}
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListVersionsResponse {
|
||||
var versions []ObjectVersion
|
||||
var prefixes []CommonPrefix
|
||||
var owner = Owner{}
|
||||
var data = ListVersionsResponse{}
|
||||
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
for _, object := range resp.Objects {
|
||||
var content = ObjectVersion{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
content.Owner = owner
|
||||
content.VersionID = "null"
|
||||
content.IsLatest = true
|
||||
versions = append(versions, content)
|
||||
}
|
||||
data.Name = bucket
|
||||
data.Versions = versions
|
||||
|
||||
data.EncodingType = encodingType
|
||||
data.Prefix = s3EncodeName(prefix, encodingType)
|
||||
data.KeyMarker = s3EncodeName(marker, encodingType)
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextKeyMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
prefixes = append(prefixes, prefixItem)
|
||||
}
|
||||
data.CommonPrefixes = prefixes
|
||||
return data
|
||||
}
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
var contents []Object
|
||||
@@ -521,8 +616,9 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []ObjectIdentifier,
|
||||
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
setCommonHeaders(w)
|
||||
if mType != mimeNone {
|
||||
w.Header().Set("Content-Type", string(mType))
|
||||
w.Header().Set(xhttp.ContentType, string(mType))
|
||||
}
|
||||
w.Header().Set(xhttp.ContentLength, strconv.Itoa(len(response)))
|
||||
w.WriteHeader(statusCode)
|
||||
if response != nil {
|
||||
w.Write(response)
|
||||
@@ -561,7 +657,7 @@ func writeSuccessNoContent(w http.ResponseWriter) {
|
||||
|
||||
// writeRedirectSeeOther writes Location header with http status 303
|
||||
func writeRedirectSeeOther(w http.ResponseWriter, location string) {
|
||||
w.Header().Set("Location", location)
|
||||
w.Header().Set(xhttp.Location, location)
|
||||
writeResponse(w, http.StatusSeeOther, nil, mimeNone)
|
||||
}
|
||||
|
||||
@@ -575,12 +671,12 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set("Retry-After", "120")
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser {
|
||||
w.Header().Set("Location", minioReservedBucketPath+reqURL.Path)
|
||||
if browser && globalIsBrowserEnabled {
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
@@ -588,7 +684,7 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path,
|
||||
w.Header().Get(responseRequestIDKey), w.Header().Get(responseDeploymentIDKey))
|
||||
w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
}
|
||||
@@ -601,7 +697,7 @@ func writeErrorResponseHeadersOnly(w http.ResponseWriter, err APIError) {
|
||||
// useful for admin APIs.
|
||||
func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(responseRequestIDKey), w.Header().Get(responseDeploymentIDKey))
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
@@ -619,8 +715,8 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
|
||||
Resource: reqURL.Path,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
RequestID: w.Header().Get(responseRequestIDKey),
|
||||
HostID: w.Header().Get(responseDeploymentIDKey),
|
||||
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
||||
HostID: globalDeploymentID,
|
||||
}
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
@@ -635,12 +731,12 @@ func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err
|
||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set("Retry-After", "120")
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalIsBrowserEnabled {
|
||||
w.Header().Set("Location", minioReservedBucketPath+reqURL.Path)
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
@@ -653,8 +749,8 @@ func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err
|
||||
Resource: reqURL.Path,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
RequestID: w.Header().Get(responseRequestIDKey),
|
||||
HostID: w.Header().Get(responseDeploymentIDKey),
|
||||
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
||||
HostID: globalDeploymentID,
|
||||
}
|
||||
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
@@ -28,10 +29,12 @@ type objectAPIHandlers struct {
|
||||
CacheAPI func() CacheObjectLayer
|
||||
// Returns true of handlers should interpret encryption.
|
||||
EncryptionEnabled func() bool
|
||||
// Returns true if handlers allow SSE-KMS encryption headers.
|
||||
AllowSSEKMS func() bool
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router, encryptionEnabled bool) {
|
||||
func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) {
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
@@ -39,111 +42,124 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled bool) {
|
||||
EncryptionEnabled: func() bool {
|
||||
return encryptionEnabled
|
||||
},
|
||||
AllowSSEKMS: func() bool {
|
||||
return allowSSEKMS
|
||||
},
|
||||
}
|
||||
|
||||
// API Router
|
||||
apiRouter := router.PathPrefix("/").Subrouter()
|
||||
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
|
||||
var routers []*mux.Router
|
||||
for _, domainName := range globalDomainNames {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName+":{port:.*}").Subrouter())
|
||||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
for _, bucket := range routers {
|
||||
// Object operations
|
||||
// HeadObject
|
||||
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.HeadObjectHandler))
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.HeadObjectHandler))
|
||||
// CopyObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectPxarts
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.ListObjectPartsHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectParts
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.ListObjectPartsHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.CompleteMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.CompleteMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.NewMultipartUploadHandler)).Queries("uploads", "")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.NewMultipartUploadHandler)).Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.AbortMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.AbortMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObjectACL - this is a dummy call.
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectACLHandler)).Queries("acl", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectACLHandler)).Queries("acl", "")
|
||||
// GetObjectTagging - this is a dummy call.
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectTaggingHandler)).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectTaggingHandler)).Queries("tagging", "")
|
||||
// SelectObjectContent
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.SelectObjectContentHandler)).Queries("select", "").Queries("select-type", "2")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.SelectObjectContentHandler)).Queries("select", "").Queries("select-type", "2")
|
||||
// GetObject
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectHandler))
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectHandler))
|
||||
// CopyObject
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectHandler))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectHandler))
|
||||
// PutObject
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectHandler))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectHandler))
|
||||
// DeleteObject
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.DeleteObjectHandler))
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.DeleteObjectHandler))
|
||||
|
||||
/// Bucket operations
|
||||
// GetBucketLocation
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLocationHandler)).Queries("location", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketLocationHandler)).Queries("location", "")
|
||||
// GetBucketPolicy
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketPolicyHandler)).Queries("policy", "")
|
||||
// GetBucketLifecycle
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketACLHandler)).Queries("acl", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketACLHandler)).Queries("acl", "")
|
||||
// GetBucketCors - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketCorsHandler)).Queries("cors", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketCorsHandler)).Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketWebsiteHandler)).Queries("website", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketWebsiteHandler)).Queries("website", "")
|
||||
// GetBucketVersioningHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketVersioningHandler)).Queries("versioning", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketVersioningHandler)).Queries("versioning", "")
|
||||
// GetBucketAccelerateHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketAccelerateHandler)).Queries("accelerate", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketAccelerateHandler)).Queries("accelerate", "")
|
||||
// GetBucketRequestPaymentHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketRequestPaymentHandler)).Queries("requestPayment", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketRequestPaymentHandler)).Queries("requestPayment", "")
|
||||
// GetBucketLoggingHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLoggingHandler)).Queries("logging", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketLoggingHandler)).Queries("logging", "")
|
||||
// GetBucketLifecycleHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
// GetBucketReplicationHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketReplicationHandler)).Queries("replication", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketReplicationHandler)).Queries("replication", "")
|
||||
// GetBucketTaggingHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketTaggingHandler)).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketTaggingHandler)).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketWebsiteHandler)).Queries("website", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(httpTraceAll(api.DeleteBucketWebsiteHandler)).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketTaggingHandler)).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(httpTraceAll(api.DeleteBucketTaggingHandler)).Queries("tagging", "")
|
||||
|
||||
// GetBucketNotification
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketNotificationHandler)).Queries("notification", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketNotificationHandler)).Queries("notification", "")
|
||||
// ListenBucketNotification
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListenBucketNotificationHandler)).Queries("events", "{events:.*}")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListenBucketNotificationHandler)).Queries("events", "{events:.*}")
|
||||
// ListMultipartUploads
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListMultipartUploadsHandler)).Queries("uploads", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListMultipartUploadsHandler)).Queries("uploads", "")
|
||||
// ListObjectsV2
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListObjectsV2Handler)).Queries("list-type", "2")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListObjectsV2Handler)).Queries("list-type", "2")
|
||||
// ListBucketVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListBucketObjectVersionsHandler)).Queries("versions", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListObjectsV1Handler))
|
||||
// PutBucketLifecycle
|
||||
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
// PutBucketPolicy
|
||||
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketPolicyHandler)).Queries("policy", "")
|
||||
|
||||
// PutBucketNotification
|
||||
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketNotificationHandler)).Queries("notification", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(httpTraceAll(api.PutBucketNotificationHandler)).Queries("notification", "")
|
||||
// PutBucket
|
||||
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketHandler))
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(httpTraceAll(api.PutBucketHandler))
|
||||
// HeadBucket
|
||||
bucket.Methods("HEAD").HandlerFunc(httpTraceAll(api.HeadBucketHandler))
|
||||
bucket.Methods(http.MethodHead).HandlerFunc(httpTraceAll(api.HeadBucketHandler))
|
||||
// PostPolicy
|
||||
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(httpTraceHdrs(api.PostPolicyBucketHandler))
|
||||
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(httpTraceHdrs(api.PostPolicyBucketHandler))
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods("POST").HandlerFunc(httpTraceAll(api.DeleteMultipleObjectsHandler)).Queries("delete", "")
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(httpTraceAll(api.DeleteMultipleObjectsHandler)).Queries("delete", "")
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketPolicyHandler)).Queries("policy", "")
|
||||
// DeleteBucketLifecycle
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
// DeleteBucket
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketHandler))
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(httpTraceAll(api.DeleteBucketHandler))
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods("GET").Path("/").HandlerFunc(httpTraceAll(api.ListBucketsHandler))
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(httpTraceAll(api.ListBucketsHandler))
|
||||
|
||||
// If none of the routes match.
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"strings"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
@@ -38,41 +39,41 @@ import (
|
||||
|
||||
// Verify if request has JWT.
|
||||
func isRequestJWT(r *http.Request) bool {
|
||||
return strings.HasPrefix(r.Header.Get("Authorization"), jwtAlgorithm)
|
||||
return strings.HasPrefix(r.Header.Get(xhttp.Authorization), jwtAlgorithm)
|
||||
}
|
||||
|
||||
// Verify if request has AWS Signature Version '4'.
|
||||
func isRequestSignatureV4(r *http.Request) bool {
|
||||
return strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm)
|
||||
return strings.HasPrefix(r.Header.Get(xhttp.Authorization), signV4Algorithm)
|
||||
}
|
||||
|
||||
// Verify if request has AWS Signature Version '2'.
|
||||
func isRequestSignatureV2(r *http.Request) bool {
|
||||
return (!strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) &&
|
||||
strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm))
|
||||
return (!strings.HasPrefix(r.Header.Get(xhttp.Authorization), signV4Algorithm) &&
|
||||
strings.HasPrefix(r.Header.Get(xhttp.Authorization), signV2Algorithm))
|
||||
}
|
||||
|
||||
// Verify if request has AWS PreSign Version '4'.
|
||||
func isRequestPresignedSignatureV4(r *http.Request) bool {
|
||||
_, ok := r.URL.Query()["X-Amz-Credential"]
|
||||
_, ok := r.URL.Query()[xhttp.AmzCredential]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Verify request has AWS PreSign Version '2'.
|
||||
func isRequestPresignedSignatureV2(r *http.Request) bool {
|
||||
_, ok := r.URL.Query()["AWSAccessKeyId"]
|
||||
_, ok := r.URL.Query()[xhttp.AmzAccessKeyID]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Verify if request has AWS Post policy Signature Version '4'.
|
||||
func isRequestPostPolicySignatureV4(r *http.Request) bool {
|
||||
return strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") &&
|
||||
return strings.Contains(r.Header.Get(xhttp.ContentType), "multipart/form-data") &&
|
||||
r.Method == http.MethodPost
|
||||
}
|
||||
|
||||
// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation.
|
||||
func isRequestSignStreamingV4(r *http.Request) bool {
|
||||
return r.Header.Get("x-amz-content-sha256") == streamingContentSHA256 &&
|
||||
return r.Header.Get(xhttp.AmzContentSha256) == streamingContentSHA256 &&
|
||||
r.Method == http.MethodPut
|
||||
}
|
||||
|
||||
@@ -109,9 +110,9 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
return authTypeJWT
|
||||
} else if isRequestPostPolicySignatureV4(r) {
|
||||
return authTypePostPolicy
|
||||
} else if _, ok := r.URL.Query()["Action"]; ok {
|
||||
} else if _, ok := r.URL.Query()[xhttp.Action]; ok {
|
||||
return authTypeSTS
|
||||
} else if _, ok := r.Header["Authorization"]; !ok {
|
||||
} else if _, ok := r.Header[xhttp.Authorization]; !ok {
|
||||
return authTypeAnonymous
|
||||
}
|
||||
return authTypeUnknown
|
||||
@@ -121,7 +122,7 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, region string) APIErrorCode {
|
||||
s3Err := ErrAccessDenied
|
||||
if _, ok := r.Header["X-Amz-Content-Sha256"]; ok &&
|
||||
if _, ok := r.Header[xhttp.AmzContentSha256]; ok &&
|
||||
getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) {
|
||||
// We only support admin credentials to access admin APIs.
|
||||
|
||||
@@ -148,11 +149,11 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, region stri
|
||||
|
||||
// Fetch the security token set by the client.
|
||||
func getSessionToken(r *http.Request) (token string) {
|
||||
token = r.Header.Get("X-Amz-Security-Token")
|
||||
token = r.Header.Get(xhttp.AmzSecurityToken)
|
||||
if token != "" {
|
||||
return token
|
||||
}
|
||||
return r.URL.Query().Get("X-Amz-Security-Token")
|
||||
return r.URL.Query().Get(xhttp.AmzSecurityToken)
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
@@ -200,6 +201,39 @@ func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
if _, ok = v.(string); !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
|
||||
if globalPolicyOPA == nil {
|
||||
// If OPA is not set, session token should
|
||||
// have a policy and its mandatory, reject
|
||||
// requests without policy claim.
|
||||
p, pok := claims[iampolicy.PolicyName]
|
||||
if !pok {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
if _, pok = p.(string); !pok {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
sp, spok := claims[iampolicy.SessionPolicyName]
|
||||
// Sub policy is optional, if not set return success.
|
||||
if !spok {
|
||||
return claims, nil
|
||||
}
|
||||
// Sub policy is set but its not a string, reject such requests
|
||||
spStr, spok := sp.(string)
|
||||
if !spok {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
// Looks like subpolicy is set and is a string, if set then its
|
||||
// base64 encoded, decode it. Decoding fails reject such requests.
|
||||
spBytes, err := base64.StdEncoding.DecodeString(spStr)
|
||||
if err != nil {
|
||||
// Base64 decoding fails, we should log to indicate
|
||||
// something is malforming the request sent by client.
|
||||
logger.LogIf(context.Background(), err)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
}
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
@@ -225,14 +259,24 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
|
||||
_, _, s3Err = checkRequestAuthTypeToAccessKey(ctx, r, action, bucketName, objectName)
|
||||
return s3Err
|
||||
}
|
||||
|
||||
// Check request auth type verifies the incoming http request
|
||||
// - validates the request signature
|
||||
// - validates the policy action if anonymous tests bucket policies if any,
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
// Additionally returns the accessKey used in the request, and if this request is by an admin.
|
||||
func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (accessKey string, owner bool, s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return ErrAccessDenied
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return s3Err
|
||||
return accessKey, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
@@ -242,17 +286,18 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
region = ""
|
||||
}
|
||||
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
|
||||
return s3Err
|
||||
return accessKey, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
return accessKey, owner, s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
var claims map[string]interface{}
|
||||
claims, s3Err = checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
return accessKey, owner, s3Err
|
||||
}
|
||||
|
||||
// LocationConstraint is valid only for CreateBucketAction.
|
||||
@@ -262,7 +307,7 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return ErrMalformedXML
|
||||
return accessKey, owner, ErrMalformedXML
|
||||
}
|
||||
|
||||
// Populate payload to extract location constraint.
|
||||
@@ -271,7 +316,7 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
var s3Error APIErrorCode
|
||||
locationConstraint, s3Error = parseLocationConstraint(r)
|
||||
if s3Error != ErrNone {
|
||||
return s3Error
|
||||
return accessKey, owner, s3Error
|
||||
}
|
||||
|
||||
// Populate payload again to handle it in HTTP handler.
|
||||
@@ -287,9 +332,10 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
return ErrNone
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
@@ -301,9 +347,10 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
return ErrNone
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '2'.
|
||||
@@ -337,8 +384,8 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
contentMD5, contentSHA256 []byte
|
||||
)
|
||||
// Extract 'Content-Md5' if present.
|
||||
if _, ok := r.Header["Content-Md5"]; ok {
|
||||
contentMD5, err = base64.StdEncoding.Strict().DecodeString(r.Header.Get("Content-Md5"))
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; ok {
|
||||
contentMD5, err = base64.StdEncoding.Strict().DecodeString(r.Header.Get(xhttp.ContentMD5))
|
||||
if err != nil || len(contentMD5) == 0 {
|
||||
return ErrInvalidDigest
|
||||
}
|
||||
@@ -347,14 +394,14 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
// Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned)
|
||||
// Do not verify 'X-Amz-Content-Sha256' if skipSHA256.
|
||||
if skipSHA256 := skipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) {
|
||||
if sha256Sum, ok := r.URL.Query()["X-Amz-Content-Sha256"]; ok && len(sha256Sum) > 0 {
|
||||
if sha256Sum, ok := r.URL.Query()[xhttp.AmzContentSha256]; ok && len(sha256Sum) > 0 {
|
||||
contentSHA256, err = hex.DecodeString(sha256Sum[0])
|
||||
if err != nil {
|
||||
return ErrContentSHA256Mismatch
|
||||
}
|
||||
}
|
||||
} else if _, ok := r.Header["X-Amz-Content-Sha256"]; !skipSHA256 && ok {
|
||||
contentSHA256, err = hex.DecodeString(r.Header.Get("X-Amz-Content-Sha256"))
|
||||
} else if _, ok := r.Header[xhttp.AmzContentSha256]; !skipSHA256 && ok {
|
||||
contentSHA256, err = hex.DecodeString(r.Header.Get(xhttp.AmzContentSha256))
|
||||
if err != nil || len(contentSHA256) == 0 {
|
||||
return ErrContentSHA256Mismatch
|
||||
}
|
||||
@@ -362,7 +409,8 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
|
||||
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
|
||||
// The verification happens implicit during reading.
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1)
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5),
|
||||
hex.EncodeToString(contentSHA256), -1, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
},
|
||||
Header: http.Header{
|
||||
"Authorization": []string{"AWS4-HMAC-SHA256 <cred_string>"},
|
||||
@@ -62,7 +62,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
},
|
||||
Header: http.Header{
|
||||
"Authorization": []string{"Bearer 12313123"},
|
||||
@@ -77,7 +77,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
},
|
||||
Header: http.Header{
|
||||
"Authorization": []string{""},
|
||||
@@ -92,7 +92,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1",
|
||||
},
|
||||
},
|
||||
@@ -105,7 +105,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
},
|
||||
Header: http.Header{
|
||||
"Content-Type": []string{"multipart/form-data"},
|
||||
|
||||
160
cmd/background-heal-ops.go
Normal file
160
cmd/background-heal-ops.go
Normal file
@@ -0,0 +1,160 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
// healTask represents what to heal along with options
|
||||
// path: '/' => Heal disk formats along with metadata
|
||||
// path: 'bucket/' or '/bucket/' => Heal bucket
|
||||
// path: 'bucket/object' => Heal object
|
||||
type healTask struct {
|
||||
path string
|
||||
opts madmin.HealOpts
|
||||
// Healing response will be sent here
|
||||
responseCh chan healResult
|
||||
}
|
||||
|
||||
// healResult represents a healing result with a possible error
|
||||
type healResult struct {
|
||||
result madmin.HealResultItem
|
||||
err error
|
||||
}
|
||||
|
||||
// healRoutine receives heal tasks, to heal buckets, objects and format.json
|
||||
type healRoutine struct {
|
||||
tasks chan healTask
|
||||
doneCh chan struct{}
|
||||
}
|
||||
|
||||
// Add a new task in the tasks queue
|
||||
func (h *healRoutine) queueHealTask(task healTask) {
|
||||
h.tasks <- task
|
||||
}
|
||||
|
||||
// Wait for heal requests and process them
|
||||
func (h *healRoutine) run() {
|
||||
ctx := context.Background()
|
||||
for {
|
||||
select {
|
||||
case task, ok := <-h.tasks:
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if globalHTTPServer != nil {
|
||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||
waitCount := 600
|
||||
// Any requests in progress, delay the heal.
|
||||
for (globalHTTPServer.GetRequestCount() >= int32(globalXLSetCount*globalXLSetDriveCount)) &&
|
||||
waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
var res madmin.HealResultItem
|
||||
var err error
|
||||
bucket, object := urlPath2BucketObjectName(task.path)
|
||||
switch {
|
||||
case bucket == "" && object == "":
|
||||
res, err = bgHealDiskFormat(ctx, task.opts)
|
||||
case bucket != "" && object == "":
|
||||
res, err = bgHealBucket(ctx, bucket, task.opts)
|
||||
case bucket != "" && object != "":
|
||||
res, err = bgHealObject(ctx, bucket, object, task.opts)
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
case <-h.doneCh:
|
||||
return
|
||||
case <-GlobalServiceDoneCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func initHealRoutine() *healRoutine {
|
||||
return &healRoutine{
|
||||
tasks: make(chan healTask),
|
||||
doneCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func initBackgroundHealing() {
|
||||
healBg := initHealRoutine()
|
||||
go healBg.run()
|
||||
|
||||
globalBackgroundHealing = healBg
|
||||
}
|
||||
|
||||
// bgHealDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func bgHealDiskFormat(ctx context.Context, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
res, err := objectAPI.HealFormat(ctx, opts.DryRun)
|
||||
|
||||
// return any error, ignore error returned when disks have
|
||||
// already healed.
|
||||
if err != nil && err != errNoHealRequired {
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers if healing is not required.
|
||||
if err == nil {
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(opts.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// bghealBucket - traverses and heals given bucket
|
||||
func bgHealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
return objectAPI.HealBucket(ctx, bucket, opts.DryRun, opts.Remove)
|
||||
}
|
||||
|
||||
// bgHealObject - heal the given object and record result
|
||||
func bgHealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
return objectAPI.HealObject(ctx, bucket, object, opts.DryRun, opts.Remove, opts.ScanMode)
|
||||
}
|
||||
@@ -32,33 +32,20 @@ type streamingBitrotWriter struct {
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
canClose chan struct{} // Needed to avoid race explained in Close() call.
|
||||
|
||||
// Following two fields are used only to make sure that Write(p) is called such that
|
||||
// len(p) is always the block size except the last block, i.e prevent programmer errors.
|
||||
currentBlockIdx int
|
||||
verifyTillIdx int
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
if b.currentBlockIdx < b.verifyTillIdx && int64(len(p)) != b.shardSize {
|
||||
// All blocks except last should be of the length b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
b.h.Reset()
|
||||
b.h.Write(p)
|
||||
hashBytes := b.h.Sum(nil)
|
||||
n, err := b.iow.Write(hashBytes)
|
||||
if n != len(hashBytes) {
|
||||
logger.LogIf(context.Background(), err)
|
||||
_, err := b.iow.Write(hashBytes)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = b.iow.Write(p)
|
||||
b.currentBlockIdx++
|
||||
return n, err
|
||||
return b.iow.Write(p)
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Close() error {
|
||||
@@ -78,16 +65,14 @@ func (b *streamingBitrotWriter) Close() error {
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
|
||||
r, w := io.Pipe()
|
||||
h := algo.New()
|
||||
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{}), 0, int(length / shardSize)}
|
||||
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{})}
|
||||
go func() {
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize := bitrotSumsTotalSize + length
|
||||
err := disk.CreateFile(volume, filePath, totalFileSize, r)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("storageDisk", disk.String())
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
totalFileSize := int64(-1) // For compressed objects length will be unknown (represented by length=-1)
|
||||
if length != -1 {
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
err := disk.CreateFile(volume, filePath, totalFileSize, r)
|
||||
r.CloseWithError(err)
|
||||
close(bw.canClose)
|
||||
}()
|
||||
@@ -118,7 +103,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
var err error
|
||||
if offset%b.shardSize != 0 {
|
||||
// Offset should always be aligned to b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
// Can never happen unless there are programmer bugs
|
||||
return 0, errUnexpected
|
||||
}
|
||||
if b.rc == nil {
|
||||
@@ -127,31 +112,26 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
||||
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("storageDisk", b.disk.String())
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if offset != b.currOffset {
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
// Can never happen unless there are programmer bugs
|
||||
return 0, errUnexpected
|
||||
}
|
||||
b.h.Reset()
|
||||
_, err = io.ReadFull(b.rc, b.hashBytes)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
_, err = io.ReadFull(b.rc, buf)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err = hashMismatchError{hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))}
|
||||
err = HashMismatchError{hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))}
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -31,19 +31,9 @@ type wholeBitrotWriter struct {
|
||||
filePath string
|
||||
shardSize int64 // This is the shard size of the erasure logic
|
||||
hash.Hash // For bitrot hash
|
||||
|
||||
// Following two fields are used only to make sure that Write(p) is called such that
|
||||
// len(p) is always the block size except the last block and prevent programmer errors.
|
||||
currentBlockIdx int
|
||||
lastBlockIdx int
|
||||
}
|
||||
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
if b.currentBlockIdx < b.lastBlockIdx && int64(len(p)) != b.shardSize {
|
||||
// All blocks except last should be of the length b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
err := b.disk.AppendFile(b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
@@ -54,7 +44,6 @@ func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
b.currentBlockIdx++
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
@@ -63,8 +52,8 @@ func (b *wholeBitrotWriter) Close() error {
|
||||
}
|
||||
|
||||
// Returns whole-file bitrot writer.
|
||||
func newWholeBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
|
||||
return &wholeBitrotWriter{disk, volume, filePath, shardSize, algo.New(), 0, int(length / shardSize)}
|
||||
func newWholeBitrotWriter(disk StorageAPI, volume, filePath string, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
|
||||
return &wholeBitrotWriter{disk, volume, filePath, shardSize, algo.New()}
|
||||
}
|
||||
|
||||
// Implementation to verify bitrot for the whole file.
|
||||
|
||||
@@ -120,7 +120,7 @@ func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, alg
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
|
||||
}
|
||||
return newWholeBitrotWriter(disk, volume, filePath, length, algo, shardSize)
|
||||
return newWholeBitrotWriter(disk, volume, filePath, algo, shardSize)
|
||||
}
|
||||
|
||||
func newBitrotReader(disk StorageAPI, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
|
||||
@@ -155,33 +155,3 @@ func bitrotWriterSum(w io.Writer) []byte {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify if a file has bitrot error.
|
||||
func bitrotCheckFile(disk StorageAPI, volume string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) (err error) {
|
||||
if algo != HighwayHash256S {
|
||||
buf := []byte{}
|
||||
// For whole-file bitrot we don't need to read the entire file as the bitrot verify happens on the server side even if we read 0-bytes.
|
||||
_, err = disk.ReadFile(volume, filePath, 0, buf, NewBitrotVerifier(algo, sum))
|
||||
return err
|
||||
}
|
||||
buf := make([]byte, shardSize)
|
||||
r := newStreamingBitrotReader(disk, volume, filePath, tillOffset, algo, shardSize)
|
||||
defer closeBitrotReaders([]io.ReaderAt{r})
|
||||
var offset int64
|
||||
for {
|
||||
if offset == tillOffset {
|
||||
break
|
||||
}
|
||||
var n int
|
||||
tmpBuf := buf
|
||||
if int64(len(tmpBuf)) > (tillOffset - offset) {
|
||||
tmpBuf = tmpBuf[:(tillOffset - offset)]
|
||||
}
|
||||
n, err = r.ReadAt(tmpBuf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offset += int64(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -46,16 +46,86 @@ func validateListObjectsArgs(prefix, marker, delimiter, encodingType string, max
|
||||
}
|
||||
}
|
||||
|
||||
/// MinIO special conditions for ListObjects.
|
||||
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != "/" {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
// Success.
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// ListBucketObjectVersions - GET Bucket Object versions
|
||||
// You can use the versions subresource to list metadata about all
|
||||
// of the versions of objects in a bucket.
|
||||
func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBucketObjectVersions")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListBucketObjectVersions", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
urlValues := r.URL.Query()
|
||||
|
||||
// Extract all the listBucketVersions query params to their native values.
|
||||
// versionIDMarker is ignored here.
|
||||
prefix, marker, delimiter, maxkeys, encodingType, _, errCode := getListBucketObjectVersionsArgs(urlValues)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the query params before beginning to serve the request.
|
||||
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, encodingType, maxkeys); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
listObjects := objectAPI.ListObjects
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshaled into S3 compatible XML header.
|
||||
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxkeys)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsInfo.Objects {
|
||||
var actualSize int64
|
||||
if listObjectsInfo.Objects[i].IsCompressed() {
|
||||
// Read the decompressed size from the meta.json.
|
||||
actualSize = listObjectsInfo.Objects[i].GetActualSize()
|
||||
if actualSize < 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDecompressedSize),
|
||||
r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Set the info.Size to the actualSize.
|
||||
listObjectsInfo.Objects[i].Size = actualSize
|
||||
} else if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
||||
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
|
||||
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].DecryptedSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType, maxkeys, listObjectsInfo)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
}
|
||||
|
||||
// ListObjectsV2Handler - GET Bucket (List Objects) Version 2.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
@@ -100,9 +170,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
listObjectsV2 := objectAPI.ListObjectsV2
|
||||
if api.CacheAPI() != nil {
|
||||
listObjectsV2 = api.CacheAPI().ListObjectsV2
|
||||
}
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshaled into S3 compatible XML header.
|
||||
@@ -179,9 +247,6 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
listObjects := objectAPI.ListObjects
|
||||
if api.CacheAPI() != nil {
|
||||
listObjects = api.CacheAPI().ListObjects
|
||||
}
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -30,13 +31,15 @@ import (
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
@@ -49,15 +52,27 @@ import (
|
||||
// -- If yes, check if the IP of entry matches local IP. This means entry is for this instance.
|
||||
// -- If IP of the entry doesn't match, this means entry is for another instance. Log an error to console.
|
||||
func initFederatorBackend(objLayer ObjectLayer) {
|
||||
// Get buckets in the backend
|
||||
b, err := objLayer.ListBuckets(context.Background())
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get buckets in the DNS
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && err != dns.ErrNoEntriesFound {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return
|
||||
}
|
||||
|
||||
bucketSet := set.NewStringSet()
|
||||
|
||||
// Add buckets that are not registered with the DNS
|
||||
g := errgroup.WithNErrs(len(b))
|
||||
for index := range b {
|
||||
index := index
|
||||
bucketSet.Add(b[index].Name)
|
||||
g.Go(func() error {
|
||||
r, gerr := globalDNSConfig.Get(b[index].Name)
|
||||
if gerr != nil {
|
||||
@@ -77,7 +92,38 @@ func initFederatorBackend(objLayer ObjectLayer) {
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
g = errgroup.WithNErrs(len(dnsBuckets))
|
||||
// Remove buckets that are in DNS for this server, but aren't local
|
||||
for index := range dnsBuckets {
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
// This is a local bucket that exists, so we can continue
|
||||
if bucketSet.Contains(dnsBuckets[index].Key) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is not for our server, so we can continue
|
||||
hostPort := net.JoinHostPort(dnsBuckets[index].Host, fmt.Sprintf("%d", dnsBuckets[index].Port))
|
||||
if globalDomainIPs.Intersection(set.CreateStringSet(hostPort)).IsEmpty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We go to here, so we know the bucket no longer exists, but is registered in DNS to this server
|
||||
if err := globalDNSConfig.DeleteRecord(dnsBuckets[index]); err != nil {
|
||||
return fmt.Errorf("Failed to remove DNS entry for %s due to %v", dnsBuckets[index].Key, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -105,9 +151,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getBucketInfo = api.CacheAPI().GetBucketInfo
|
||||
}
|
||||
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -202,11 +246,9 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
if api.CacheAPI() != nil {
|
||||
listBuckets = api.CacheAPI().ListBuckets
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListAllMyBucketsAction, "", ""); s3Error != ErrNone {
|
||||
accessKey, owner, s3Error := checkRequestAuthTypeToAccessKey(ctx, r, policy.ListAllMyBucketsAction, "", "")
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -240,8 +282,28 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
}
|
||||
|
||||
// Set prefix value for "s3:prefix" policy conditionals.
|
||||
r.Header.Set("prefix", "")
|
||||
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
var newBucketsInfo []BucketInfo
|
||||
for _, bucketInfo := range bucketsInfo {
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", accessKey),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
}) {
|
||||
newBucketsInfo = append(newBucketsInfo, bucketInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate response.
|
||||
response := generateListBucketsResponse(bucketsInfo)
|
||||
response := generateListBucketsResponse(newBucketsInfo)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
|
||||
// Write response.
|
||||
@@ -263,16 +325,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
var s3Error APIErrorCode
|
||||
if s3Error = checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, ""); s3Error != ErrNone {
|
||||
// In the event access is denied, a 200 response should still be returned
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if s3Error != ErrAccessDenied {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Content-Length is required and should be non-zero
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if r.ContentLength <= 0 {
|
||||
@@ -319,42 +371,60 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
deleteObject := objectAPI.DeleteObject
|
||||
deleteObjectsFn := objectAPI.DeleteObjects
|
||||
if api.CacheAPI() != nil {
|
||||
deleteObject = api.CacheAPI().DeleteObject
|
||||
deleteObjectsFn = api.CacheAPI().DeleteObjects
|
||||
}
|
||||
|
||||
var dErrs = make([]error, len(deleteObjects.Objects))
|
||||
type delObj struct {
|
||||
origIndex int
|
||||
name string
|
||||
}
|
||||
|
||||
var objectsToDelete []delObj
|
||||
var dErrs = make([]APIErrorCode, len(deleteObjects.Objects))
|
||||
|
||||
for index, object := range deleteObjects.Objects {
|
||||
// If the request is denied access, each item
|
||||
// should be marked as 'AccessDenied'
|
||||
if s3Error == ErrAccessDenied {
|
||||
dErrs[index] = PrefixAccessDenied{
|
||||
Bucket: bucket,
|
||||
Object: object.ObjectName,
|
||||
if dErrs[index] = checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); dErrs[index] != ErrNone {
|
||||
if dErrs[index] == ErrSignatureDoesNotMatch || dErrs[index] == ErrInvalidAccessKeyID {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(dErrs[index]), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
dErrs[index] = deleteObject(ctx, bucket, object.ObjectName)
|
||||
|
||||
objectsToDelete = append(objectsToDelete, delObj{index, object.ObjectName})
|
||||
}
|
||||
|
||||
toNames := func(input []delObj) (output []string) {
|
||||
output = make([]string, len(input))
|
||||
for i := range input {
|
||||
output[i] = input[i].name
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
errs, err := deleteObjectsFn(ctx, bucket, toNames(objectsToDelete))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
for i, obj := range objectsToDelete {
|
||||
dErrs[obj.origIndex] = toAPIErrorCode(ctx, errs[i])
|
||||
}
|
||||
|
||||
// Collect deleted objects and errors if any.
|
||||
var deletedObjects []ObjectIdentifier
|
||||
var deleteErrors []DeleteError
|
||||
for index, err := range dErrs {
|
||||
for index, errCode := range dErrs {
|
||||
object := deleteObjects.Objects[index]
|
||||
// Success deleted objects are collected separately.
|
||||
if err == nil {
|
||||
if errCode == ErrNone || errCode == ErrNoSuchKey {
|
||||
deletedObjects = append(deletedObjects, object)
|
||||
continue
|
||||
}
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
// If the object is not found it should be
|
||||
// accounted as deleted as per S3 spec.
|
||||
deletedObjects = append(deletedObjects, object)
|
||||
continue
|
||||
}
|
||||
apiErr := toAPIError(ctx, err)
|
||||
apiErr := getAPIError(errCode)
|
||||
// Error during delete should be collected separately.
|
||||
deleteErrors = append(deleteErrors, DeleteError{
|
||||
Code: apiErr.Code,
|
||||
@@ -437,7 +507,8 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
w.Header().Set("Location", getObjectLocation(r, globalDomainNames, bucket, ""))
|
||||
w.Header().Set(xhttp.Location,
|
||||
getObjectLocation(r, globalDomainNames, bucket, ""))
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
return
|
||||
@@ -458,7 +529,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
w.Header().Set("Location", path.Clean(r.URL.Path)) // Clean any trailing slashes.
|
||||
w.Header().Set(xhttp.Location, path.Clean(r.URL.Path)) // Clean any trailing slashes.
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
@@ -490,6 +561,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
|
||||
// To detect if the client has disconnected.
|
||||
r.Body = &detectDisconnect{r.Body, r.Context().Done()}
|
||||
|
||||
// Require Content-Length to be set in the request
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
@@ -524,7 +598,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
// Remove all tmp files creating during multipart upload
|
||||
// Remove all tmp files created during multipart upload
|
||||
defer form.RemoveAll()
|
||||
|
||||
// Extract all form fields
|
||||
@@ -615,7 +689,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
@@ -637,7 +711,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C and SSE-S3 requests
|
||||
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, SlashSeparator) { // handle SSE-C and SSE-S3 requests
|
||||
var reader io.Reader
|
||||
var key []byte
|
||||
if crypto.SSEC.IsRequested(formValues) {
|
||||
@@ -653,7 +727,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: fileSize}
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize) // do not try to verify encrypted content
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -669,8 +744,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
location := getObjectLocation(r, globalDomainNames, bucket, object)
|
||||
w.Header()["ETag"] = []string{`"` + objInfo.ETag + `"`}
|
||||
w.Header().Set("Location", location)
|
||||
w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`}
|
||||
w.Header().Set(xhttp.Location, location)
|
||||
|
||||
// Notify object created event.
|
||||
defer sendEvent(eventArgs{
|
||||
@@ -733,9 +808,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getBucketInfo = api.CacheAPI().GetBucketInfo
|
||||
}
|
||||
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
@@ -765,9 +838,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
deleteBucket := objectAPI.DeleteBucket
|
||||
if api.CacheAPI() != nil {
|
||||
deleteBucket = api.CacheAPI().DeleteBucket
|
||||
}
|
||||
|
||||
// Attempt to delete bucket.
|
||||
if err := deleteBucket(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
@@ -786,6 +857,8 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
globalNotificationSys.RemoveNotification(bucket)
|
||||
globalPolicySys.Remove(bucket)
|
||||
globalNotificationSys.DeleteBucket(ctx, bucket)
|
||||
globalLifecycleSys.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketLifecycle(ctx, bucket)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
|
||||
@@ -69,7 +69,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
locationResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: "/" + bucketName + "/",
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidAccessKeyId",
|
||||
Message: "The access key ID you provided does not exist in our records.",
|
||||
},
|
||||
@@ -394,7 +394,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
prefix: "",
|
||||
keyMarker: "",
|
||||
uploadIDMarker: "",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
maxUploads: "100",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
@@ -762,7 +762,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
// Assert the response code with the expected status.
|
||||
if rec.Code != testCase.expectedRespStatus {
|
||||
t.Errorf("Case %d: MinIO %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
|
||||
t.Errorf("Test %d: MinIO %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
|
||||
}
|
||||
|
||||
// read the response body.
|
||||
|
||||
165
cmd/bucket-lifecycle-handler.go
Normal file
165
cmd/bucket-lifecycle-handler.go
Normal file
@@ -0,0 +1,165 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/lifecycle"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
// Lifecycle configuration file.
|
||||
bucketLifecycleConfig = "lifecycle.xml"
|
||||
)
|
||||
|
||||
// PutBucketLifecycleHandler - This HTTP handler stores given bucket lifecycle configuration as per
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
|
||||
func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketLifecycle")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketLifecycle", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketLifecycleAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// PutBucketLifecycle always needs a Content-Md5
|
||||
if _, ok := r.Header["Content-Md5"]; !ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = objAPI.SetBucketLifecycle(ctx, bucket, bucketLifecycle); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalLifecycleSys.Set(bucket, *bucketLifecycle)
|
||||
globalNotificationSys.SetBucketLifecycle(ctx, bucket, bucketLifecycle)
|
||||
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// GetBucketLifecycleHandler - This HTTP handler returns bucket policy configuration.
|
||||
func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketLifecycle")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketLifecycle", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketLifecycleAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Read bucket access lifecycle.
|
||||
bucketLifecycle, err := objAPI.GetBucketLifecycle(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
lifecycleData, err := xml.Marshal(bucketLifecycle)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write lifecycle configuration to client.
|
||||
writeSuccessResponseXML(w, lifecycleData)
|
||||
}
|
||||
|
||||
// DeleteBucketLifecycleHandler - This HTTP handler removes bucket lifecycle configuration.
|
||||
func (api objectAPIHandlers) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketLifecycle")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketLifecycle", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketLifecycleAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err := objAPI.DeleteBucketLifecycle(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalLifecycleSys.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketLifecycle(ctx, bucket)
|
||||
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
@@ -17,12 +17,15 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
@@ -48,6 +51,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
var config *event.Config
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -71,24 +75,31 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Attempt to successfully load notification config.
|
||||
nConfig, err := readNotificationConfig(ctx, objAPI, bucketName)
|
||||
// Construct path to notification.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig)
|
||||
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
// Ignore errNoSuchNotifications to comply with AWS S3.
|
||||
if err != errNoSuchNotifications {
|
||||
if err != errConfigNotFound {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
config = &event.Config{}
|
||||
} else {
|
||||
if err = xml.NewDecoder(bytes.NewReader(configData)).Decode(&config); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
nConfig = &event.Config{}
|
||||
}
|
||||
|
||||
config.SetRegion(globalServerConfig.GetRegion())
|
||||
|
||||
// If xml namespace is empty, set a default value before returning.
|
||||
if nConfig.XMLNS == "" {
|
||||
nConfig.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
if config.XMLNS == "" {
|
||||
config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
}
|
||||
|
||||
notificationBytes, err := xml.Marshal(nConfig)
|
||||
notificationBytes, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -142,9 +153,10 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
if event.IsEventError(err) {
|
||||
apiErr = toAPIError(ctx, err)
|
||||
}
|
||||
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
if _, ok := err.(*event.ErrARNNotFound); !ok {
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = saveNotificationConfig(ctx, objectAPI, bucketName, config); err != nil {
|
||||
@@ -246,6 +258,8 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set(xhttp.ContentType, "text/event-stream")
|
||||
|
||||
target, err := target.NewHTTPClientTarget(*host, w)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
|
||||
@@ -71,7 +71,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
|
||||
// Error out if Content-Length is beyond allowed size.
|
||||
if r.ContentLength > maxBucketPolicySize {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPolicyTooLarge), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
|
||||
// Load all custom CA files.
|
||||
for _, fi := range fis {
|
||||
// Skip all directories.
|
||||
if hasSuffix(fi, slashSeparator) {
|
||||
if hasSuffix(fi, SlashSeparator) {
|
||||
continue
|
||||
}
|
||||
caCert, err := ioutil.ReadFile(pathJoin(certsCAsDir, fi))
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
dns2 "github.com/miekg/dns"
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/logger/target/console"
|
||||
"github.com/minio/minio/cmd/logger/target/http"
|
||||
@@ -38,10 +38,32 @@ import (
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
|
||||
if (globalAutoEncryption || GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Encryption support is requested but '%s' does not support encryption", name)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "gateway") {
|
||||
if GlobalGatewaySSE.IsSet() && GlobalKMS == nil {
|
||||
uiErr := uiErrInvalidGWSSEEnvValue(nil).Msg("MINIO_GATEWAY_SSE set but KMS is not configured")
|
||||
logger.Fatal(uiErr, "Unable to start gateway with SSE")
|
||||
}
|
||||
}
|
||||
|
||||
if globalIsCompressionEnabled && !objAPI.IsCompressionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Compression support is requested but '%s' does not support compression", name)
|
||||
}
|
||||
}
|
||||
|
||||
// Check for updates and print a notification message
|
||||
func checkUpdate(mode string) {
|
||||
// Its OK to ignore any errors during doUpdate() here.
|
||||
if updateMsg, _, currentReleaseTime, latestReleaseTime, err := getUpdateInfo(2*time.Second, mode); err == nil {
|
||||
if updateMsg == "" {
|
||||
return
|
||||
}
|
||||
if globalInplaceUpdateDisabled {
|
||||
logger.StartupMessage(updateMsg)
|
||||
} else {
|
||||
@@ -52,21 +74,23 @@ func checkUpdate(mode string) {
|
||||
|
||||
// Load logger targets based on user's configuration
|
||||
func loadLoggers() {
|
||||
loggerUserAgent := getUserAgent(getMinioMode())
|
||||
|
||||
auditEndpoint, ok := os.LookupEnv("MINIO_AUDIT_LOGGER_HTTP_ENDPOINT")
|
||||
if ok {
|
||||
// Enable audit HTTP logging through ENV.
|
||||
logger.AddAuditTarget(http.New(auditEndpoint, NewCustomHTTPTransport()))
|
||||
logger.AddAuditTarget(http.New(auditEndpoint, loggerUserAgent, NewCustomHTTPTransport()))
|
||||
}
|
||||
|
||||
loggerEndpoint, ok := os.LookupEnv("MINIO_LOGGER_HTTP_ENDPOINT")
|
||||
if ok {
|
||||
// Enable HTTP logging through ENV.
|
||||
logger.AddTarget(http.New(loggerEndpoint, NewCustomHTTPTransport()))
|
||||
logger.AddTarget(http.New(loggerEndpoint, loggerUserAgent, NewCustomHTTPTransport()))
|
||||
} else {
|
||||
for _, l := range globalServerConfig.Logger.HTTP {
|
||||
if l.Enabled {
|
||||
// Enable http logging
|
||||
logger.AddTarget(http.New(l.Endpoint, NewCustomHTTPTransport()))
|
||||
logger.AddTarget(http.New(l.Endpoint, loggerUserAgent, NewCustomHTTPTransport()))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -161,6 +185,9 @@ func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
|
||||
|
||||
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
|
||||
|
||||
// Check "compat" flag from command line argument.
|
||||
globalCLIContext.StrictS3Compat = ctx.IsSet("compat") || ctx.GlobalIsSet("compat")
|
||||
}
|
||||
|
||||
// Parses the given compression exclude list `extensions` or `content-types`.
|
||||
@@ -208,13 +235,6 @@ func handleCommonEnvVars() {
|
||||
globalIsBrowserEnabled = bool(browserFlag)
|
||||
}
|
||||
|
||||
traceFile := os.Getenv("MINIO_HTTP_TRACE")
|
||||
if traceFile != "" {
|
||||
var err error
|
||||
globalHTTPTraceFile, err = os.OpenFile(traceFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660)
|
||||
logger.FatalIf(err, "error opening file %s", traceFile)
|
||||
}
|
||||
|
||||
etcdEndpointsEnv, ok := os.LookupEnv("MINIO_ETCD_ENDPOINTS")
|
||||
if ok {
|
||||
etcdEndpoints := strings.Split(etcdEndpointsEnv, ",")
|
||||
|
||||
@@ -20,9 +20,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
@@ -51,29 +49,12 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func deleteConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
|
||||
_, err := client.Delete(ctx, configFile)
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
return objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
|
||||
}
|
||||
|
||||
func saveConfigEtcd(ctx context.Context, client *etcd.Client, configFile string, data []byte) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
_, err := client.Put(timeoutCtx, configFile, string(data))
|
||||
if err == context.DeadlineExceeded {
|
||||
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)), globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -82,61 +63,7 @@ func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data
|
||||
return err
|
||||
}
|
||||
|
||||
func readConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) ([]byte, error) {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
resp, err := client.Get(timeoutCtx, configFile)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
return nil, fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
if string(ev.Key) == configFile {
|
||||
return ev.Value, nil
|
||||
}
|
||||
}
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
// watchConfigEtcd - watches for changes on `configFile` on etcd and loads them.
|
||||
func watchConfigEtcd(objAPI ObjectLayer, configFile string, loadCfgFn func(ObjectLayer) error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
defer cancel()
|
||||
for watchResp := range globalEtcdClient.Watch(ctx, configFile) {
|
||||
for _, event := range watchResp.Events {
|
||||
if event.IsModify() || event.IsCreate() {
|
||||
loadCfgFn(objAPI)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
resp, err := client.Get(timeoutCtx, configFile)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
}
|
||||
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
return errConfigNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
if globalEtcdClient != nil {
|
||||
return checkConfigEtcd(ctx, globalEtcdClient, configFile)
|
||||
}
|
||||
|
||||
if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile, ObjectOptions{}); err != nil {
|
||||
// Treat object not found as config not found.
|
||||
if isErrObjectNotFound(err) {
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
@@ -281,17 +281,27 @@ func (s *serverConfig) loadFromEnvs() {
|
||||
}
|
||||
|
||||
if jwksURL, ok := os.LookupEnv("MINIO_IAM_JWKS_URL"); ok {
|
||||
if u, err := xnet.ParseURL(jwksURL); err == nil {
|
||||
s.OpenID.JWKS.URL = u
|
||||
logger.FatalIf(s.OpenID.JWKS.PopulatePublicKey(), "Unable to populate public key from JWKS URL")
|
||||
u, err := xnet.ParseURL(jwksURL)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to parse MINIO_IAM_JWKS_URL %s", jwksURL)
|
||||
}
|
||||
s.OpenID.JWKS.URL = u
|
||||
}
|
||||
|
||||
if opaURL, ok := os.LookupEnv("MINIO_IAM_OPA_URL"); ok {
|
||||
if u, err := xnet.ParseURL(opaURL); err == nil {
|
||||
s.Policy.OPA.URL = u
|
||||
s.Policy.OPA.AuthToken = os.Getenv("MINIO_IAM_OPA_AUTHTOKEN")
|
||||
u, err := xnet.ParseURL(opaURL)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to parse MINIO_IAM_OPA_URL %s", opaURL)
|
||||
}
|
||||
opaArgs := iampolicy.OpaArgs{
|
||||
URL: u,
|
||||
AuthToken: os.Getenv("MINIO_IAM_OPA_AUTHTOKEN"),
|
||||
Transport: NewCustomHTTPTransport(),
|
||||
CloseRespFn: xhttp.DrainBody,
|
||||
}
|
||||
logger.FatalIf(opaArgs.Validate(), "Unable to reach MINIO_IAM_OPA_URL %s", opaURL)
|
||||
s.Policy.OPA.URL = opaArgs.URL
|
||||
s.Policy.OPA.AuthToken = opaArgs.AuthToken
|
||||
}
|
||||
}
|
||||
|
||||
@@ -303,7 +313,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewAMQPTarget(k, v)
|
||||
t, err := target.NewAMQPTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("amqp(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -314,7 +324,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewElasticsearchTarget(k, v)
|
||||
t, err := target.NewElasticsearchTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("elasticsearch(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -325,7 +335,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewKafkaTarget(k, v)
|
||||
t, err := target.NewKafkaTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kafka(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -347,7 +357,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewMySQLTarget(k, v)
|
||||
t, err := target.NewMySQLTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mysql(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -358,7 +368,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewNATSTarget(k, v)
|
||||
t, err := target.NewNATSTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nats(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -369,7 +379,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewNSQTarget(k, v)
|
||||
t, err := target.NewNSQTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nsq(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -380,7 +390,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewPostgreSQLTarget(k, v)
|
||||
t, err := target.NewPostgreSQLTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("postgreSQL(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -391,7 +401,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewRedisTarget(k, v)
|
||||
t, err := target.NewRedisTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("redis(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -536,7 +546,7 @@ func (s *serverConfig) loadToCachedConfigs() {
|
||||
globalCacheMaxUse = cacheConf.MaxUse
|
||||
}
|
||||
if err := Environment.LookupKMSConfig(s.KMS); err != nil {
|
||||
logger.FatalIf(err, "Unable to setup the KMS")
|
||||
logger.FatalIf(err, "Unable to setup the KMS %s", s.KMS.Vault.Endpoint)
|
||||
}
|
||||
|
||||
if !globalIsCompressionEnabled {
|
||||
@@ -546,15 +556,22 @@ func (s *serverConfig) loadToCachedConfigs() {
|
||||
globalIsCompressionEnabled = compressionConf.Enabled
|
||||
}
|
||||
|
||||
if s.OpenID.JWKS.URL != nil && s.OpenID.JWKS.URL.String() != "" {
|
||||
logger.FatalIf(s.OpenID.JWKS.PopulatePublicKey(),
|
||||
"Unable to populate public key from JWKS URL %s", s.OpenID.JWKS.URL)
|
||||
}
|
||||
|
||||
globalIAMValidators = getAuthValidators(s)
|
||||
|
||||
if s.Policy.OPA.URL != nil && s.Policy.OPA.URL.String() != "" {
|
||||
globalPolicyOPA = iampolicy.NewOpa(iampolicy.OpaArgs{
|
||||
opaArgs := iampolicy.OpaArgs{
|
||||
URL: s.Policy.OPA.URL,
|
||||
AuthToken: s.Policy.OPA.AuthToken,
|
||||
Transport: NewCustomHTTPTransport(),
|
||||
CloseRespFn: xhttp.DrainBody,
|
||||
})
|
||||
}
|
||||
logger.FatalIf(opaArgs.Validate(), "Unable to reach OPA URL %s", s.Policy.OPA.URL)
|
||||
globalPolicyOPA = iampolicy.NewOpa(opaArgs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -637,7 +654,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
}
|
||||
for id, args := range config.Notify.AMQP {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewAMQPTarget(id, args)
|
||||
newTarget, err := target.NewAMQPTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -651,7 +668,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.Elasticsearch {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewElasticsearchTarget(id, args)
|
||||
newTarget, err := target.NewElasticsearchTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -667,7 +684,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.Kafka {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewKafkaTarget(id, args)
|
||||
newTarget, err := target.NewKafkaTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -696,7 +713,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.MySQL {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewMySQLTarget(id, args)
|
||||
newTarget, err := target.NewMySQLTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -710,7 +727,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.NATS {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewNATSTarget(id, args)
|
||||
newTarget, err := target.NewNATSTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -724,7 +741,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.NSQ {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewNSQTarget(id, args)
|
||||
newTarget, err := target.NewNSQTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -738,7 +755,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.PostgreSQL {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args)
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -752,7 +769,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.Redis {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewRedisTarget(id, args)
|
||||
newTarget, err := target.NewRedisTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -767,7 +784,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
for id, args := range config.Notify.Webhook {
|
||||
if args.Enable {
|
||||
args.RootCAs = globalRootCAs
|
||||
newTarget := target.NewWebhookTarget(id, args)
|
||||
newTarget := target.NewWebhookTarget(id, args, GlobalServiceDoneCh)
|
||||
if err := targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
|
||||
@@ -185,10 +185,10 @@ func TestValidateConfig(t *testing.T) {
|
||||
{`{"version": "` + v + `", "browser": "on", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, false},
|
||||
|
||||
// Test 11 - Test AMQP
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false, "queueDir": "", "queueLimit": 0}}}}`, false},
|
||||
|
||||
// Test 12 - Test NATS
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "queueDir": "", "queueLimit": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
|
||||
|
||||
// Test 13 - Test ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false},
|
||||
@@ -197,16 +197,16 @@ func TestValidateConfig(t *testing.T) {
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, false},
|
||||
|
||||
// Test 15 - Test PostgreSQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "", "queueDir": "", "queueLimit": 0 }}}}`, false},
|
||||
|
||||
// Test 16 - Test Kafka
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "", "queueDir": "", "queueLimit": 0 } }}}`, false},
|
||||
|
||||
// Test 17 - Test Webhook
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "", "queueDir": "", "queueLimit": 0} }}}`, false},
|
||||
|
||||
// Test 18 - Test MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "", "queueDir": "", "queueLimit": 0 }}}}`, false},
|
||||
|
||||
// Test 19 - Test Format for MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
|
||||
@@ -224,10 +224,10 @@ func TestValidateConfig(t *testing.T) {
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "invalid", "url": "example.com", "index": "myindex" } }}}`, false},
|
||||
|
||||
// Test 24 - Test valid Format for ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "namespace", "url": "example.com", "index": "myindex" } }}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "namespace", "url": "example.com", "index": "myindex", "queueDir": "", "queueLimit": 0 } }}}`, true},
|
||||
|
||||
// Test 25 - Test Format for Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1", "queueDir": "", "queueLimit": 0 } }}}`, false},
|
||||
|
||||
// Test 26 - Test valid Format for Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
|
||||
@@ -236,7 +236,7 @@ func TestValidateConfig(t *testing.T) {
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "username": "", "password": "", "queueDir": "", "queueLimit": 0}}}}`, false},
|
||||
|
||||
// Test 28 - Test NSQ
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nsq": { "1": { "enable": true, "nsqdAddress": "", "topic": ""} }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nsq": { "1": { "enable": true, "nsqdAddress": "", "topic": "", "queueDir": "", "queueLimit": 0} }}}`, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
@@ -2413,6 +2413,7 @@ func migrateV27ToV28() error {
|
||||
}
|
||||
|
||||
// Migrates ${HOME}/.minio/config.json to '<export_path>/.minio.sys/config/config.json'
|
||||
// if etcd is configured then migrates /config/config.json to '<export_path>/.minio.sys/config/config.json'
|
||||
func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
// Construct path to config.json for the given bucket.
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
@@ -2423,10 +2424,14 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Rename config.json to config.json.deprecated only upon
|
||||
// success of this function.
|
||||
if err == nil {
|
||||
os.Rename(getConfigFile(), getConfigFile()+".deprecated")
|
||||
if globalEtcdClient != nil {
|
||||
deleteKeyEtcd(context.Background(), globalEtcdClient, configFile)
|
||||
} else {
|
||||
// Rename config.json to config.json.deprecated only upon
|
||||
// success of this function.
|
||||
os.Rename(getConfigFile(), getConfigFile()+".deprecated")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -2435,7 +2440,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
|
||||
objLock := globalNSMutex.NewNSLock(context.Background(), minioMetaBucket, transactionConfigFile)
|
||||
if err = objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2446,20 +2451,24 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
return err
|
||||
} // if errConfigNotFound proceed to migrate..
|
||||
|
||||
var configFiles = []string{
|
||||
getConfigFile(),
|
||||
getConfigFile() + ".deprecated",
|
||||
configFile,
|
||||
}
|
||||
var config = &serverConfig{}
|
||||
if _, err = Load(getConfigFile(), config); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
// Read from deprecate file as well if necessary.
|
||||
if _, err = Load(getConfigFile()+".deprecated", config); err != nil {
|
||||
for _, cfgFile := range configFiles {
|
||||
if _, err = Load(cfgFile, config); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
// If all else fails simply initialize the server config.
|
||||
return newSrvConfig(objAPI)
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
// Initialize the server config, if no config exists.
|
||||
return newSrvConfig(objAPI)
|
||||
}
|
||||
return saveServerConfig(context.Background(), objAPI, config)
|
||||
}
|
||||
@@ -2483,7 +2492,7 @@ func migrateMinioSysConfig(objAPI ObjectLayer) error {
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
|
||||
objLock := globalNSMutex.NewNSLock(context.Background(), minioMetaBucket, transactionConfigFile)
|
||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -175,7 +175,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0644); err != nil {
|
||||
@@ -238,7 +238,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0644); err != nil {
|
||||
@@ -335,7 +335,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
for i := 3; i <= 17; i++ {
|
||||
// Create a corrupted config file
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
@@ -884,7 +884,7 @@ type serverConfigV32 struct {
|
||||
} `json:"policy"`
|
||||
}
|
||||
|
||||
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir, queueLimit with MQTT.
|
||||
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir, queueLimit in all notification targets.
|
||||
type serverConfigV33 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
@@ -49,10 +50,6 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config *serverCon
|
||||
}
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
if globalEtcdClient != nil {
|
||||
return saveConfigEtcd(ctx, globalEtcdClient, configFile, data)
|
||||
}
|
||||
|
||||
// Create a backup of the current config
|
||||
oldData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err == nil {
|
||||
@@ -71,15 +68,8 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config *serverCon
|
||||
}
|
||||
|
||||
func readServerConfig(ctx context.Context, objAPI ObjectLayer) (*serverConfig, error) {
|
||||
var configData []byte
|
||||
var err error
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
if globalEtcdClient != nil {
|
||||
configData, err = readConfigEtcd(ctx, globalEtcdClient, configFile)
|
||||
} else {
|
||||
configData, err = readConfig(ctx, objAPI, configFile)
|
||||
}
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -112,6 +102,25 @@ func (sys *ConfigSys) Load(objAPI ObjectLayer) error {
|
||||
return sys.Init(objAPI)
|
||||
}
|
||||
|
||||
// WatchConfigNASDisk - watches nas disk on periodic basis.
|
||||
func (sys *ConfigSys) WatchConfigNASDisk(objAPI ObjectLayer) {
|
||||
configInterval := globalRefreshIAMInterval
|
||||
watchDisk := func() {
|
||||
ticker := time.NewTicker(configInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-GlobalServiceDoneCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
loadConfig(objAPI)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Refresh configSys in background for NAS gateway.
|
||||
go watchDisk()
|
||||
}
|
||||
|
||||
// Init - initializes config system from config.json.
|
||||
func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
@@ -152,45 +161,25 @@ func initConfig(objAPI ObjectLayer) error {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
if globalEtcdClient != nil {
|
||||
if err := checkConfigEtcd(context.Background(), globalEtcdClient, getConfigFile()); err != nil {
|
||||
if err == errConfigNotFound {
|
||||
// Migrates all configs at old location.
|
||||
if err = migrateConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrates etcd ${HOME}/.minio/config.json to '/config/config.json'
|
||||
if err = migrateConfigToMinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Watch config for changes and reloads them.
|
||||
go watchConfigEtcd(objAPI, configFile, loadConfig)
|
||||
|
||||
} else {
|
||||
if isFile(getConfigFile()) {
|
||||
if err := migrateConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Migrates ${HOME}/.minio/config.json or config.json.deprecated
|
||||
// to '<export_path>/.minio.sys/config/config.json'
|
||||
// ignore if the file doesn't exist.
|
||||
if err := migrateConfigToMinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Migrates backend '<export_path>/.minio.sys/config/config.json' to latest version.
|
||||
if err := migrateMinioSysConfig(objAPI); err != nil {
|
||||
if isFile(getConfigFile()) {
|
||||
if err := migrateConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Migrates ${HOME}/.minio/config.json or config.json.deprecated
|
||||
// to '<export_path>/.minio.sys/config/config.json'
|
||||
// ignore if the file doesn't exist.
|
||||
// If etcd is set then migrates /config/config.json
|
||||
// to '<export_path>/.minio.sys/config/config.json'
|
||||
if err := migrateConfigToMinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Migrates backend '<export_path>/.minio.sys/config/config.json' to latest version.
|
||||
if err := migrateMinioSysConfig(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return loadConfig(objAPI)
|
||||
}
|
||||
|
||||
@@ -62,6 +62,8 @@ var (
|
||||
|
||||
errInvalidInternalIV = Error{"The internal encryption IV is malformed"}
|
||||
errInvalidInternalSealAlgorithm = Error{"The internal seal algorithm is invalid and not supported"}
|
||||
|
||||
errMissingUpdatedKey = Error{"The key update returned no error but also no sealed key"}
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
@@ -125,6 +126,25 @@ func (s3KMS) IsRequested(h http.Header) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ParseHTTP parses the SSE-KMS headers and returns the SSE-KMS key ID
|
||||
// and context, if present, on success.
|
||||
func (s3KMS) ParseHTTP(h http.Header) (string, interface{}, error) {
|
||||
algorithm := h.Get(SSEHeader)
|
||||
if algorithm != SSEAlgorithmKMS {
|
||||
return "", nil, ErrInvalidEncryptionMethod
|
||||
}
|
||||
|
||||
contextStr, ok := h[SSEKmsContext]
|
||||
if ok {
|
||||
var context map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(contextStr[0]), &context); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return h.Get(SSEKmsID), context, nil
|
||||
}
|
||||
return h.Get(SSEKmsID), nil, nil
|
||||
}
|
||||
|
||||
var (
|
||||
// SSEC represents AWS SSE-C. It provides functionality to handle
|
||||
// SSE-C requests.
|
||||
|
||||
@@ -54,6 +54,56 @@ func TestKMSIsRequested(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var kmsParseHTTPTests = []struct {
|
||||
Header http.Header
|
||||
ShouldFail bool
|
||||
}{
|
||||
{Header: http.Header{}, ShouldFail: true}, // 0
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"aws:kms"}}, ShouldFail: false}, // 1
|
||||
{Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption": []string{"aws:kms"},
|
||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"s3-007-293847485-724784"},
|
||||
}, ShouldFail: false}, // 2
|
||||
{Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption": []string{"aws:kms"},
|
||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"s3-007-293847485-724784"},
|
||||
"X-Amz-Server-Side-Encryption-Context": []string{"{}"},
|
||||
}, ShouldFail: false}, // 3
|
||||
{Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption": []string{"aws:kms"},
|
||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"s3-007-293847485-724784"},
|
||||
"X-Amz-Server-Side-Encryption-Context": []string{"{\"bucket\": \"some-bucket\"}"},
|
||||
}, ShouldFail: false}, // 4
|
||||
{Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption": []string{"aws:kms"},
|
||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"s3-007-293847485-724784"},
|
||||
"X-Amz-Server-Side-Encryption-Context": []string{"{\"bucket\": \"some-bucket\"}"},
|
||||
}, ShouldFail: false}, // 5
|
||||
{Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"s3-007-293847485-724784"},
|
||||
"X-Amz-Server-Side-Encryption-Context": []string{"{\"bucket\": \"some-bucket\"}"},
|
||||
}, ShouldFail: true}, // 6
|
||||
{Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption": []string{"aws:kms"},
|
||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"s3-007-293847485-724784"},
|
||||
"X-Amz-Server-Side-Encryption-Context": []string{"{\"bucket\": \"some-bucket\""}, // invalid JSON
|
||||
}, ShouldFail: true}, // 7
|
||||
|
||||
}
|
||||
|
||||
func TestKMSParseHTTP(t *testing.T) {
|
||||
for i, test := range kmsParseHTTPTests {
|
||||
_, _, err := S3KMS.ParseHTTP(test.Header)
|
||||
if err == nil && test.ShouldFail {
|
||||
t.Errorf("Test %d: should fail but succeeded", i)
|
||||
}
|
||||
if err != nil && !test.ShouldFail {
|
||||
t.Errorf("Test %d: should pass but failed with: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var s3IsRequestedTests = []struct {
|
||||
Header http.Header
|
||||
Expected bool
|
||||
|
||||
@@ -86,6 +86,19 @@ type KMS interface {
|
||||
// referenced by the keyID. The provided context must
|
||||
// match the context used to generate the sealed key.
|
||||
UnsealKey(keyID string, sealedKey []byte, context Context) (key [32]byte, err error)
|
||||
|
||||
// UpdateKey re-wraps the sealedKey if the master key, referenced by
|
||||
// `keyID`, has changed in the meantime. This usually happens when the
|
||||
// KMS operator performs a key-rotation operation of the master key.
|
||||
// UpdateKey fails if the provided sealedKey cannot be decrypted using
|
||||
// the master key referenced by keyID.
|
||||
//
|
||||
// UpdateKey makes no guarantees whatsoever about whether the returned
|
||||
// rotatedKey is actually different from the sealedKey. If nothing has
|
||||
// changed at the KMS or if the KMS does not support updating generated
|
||||
// keys this method may behave like a NOP and just return the sealedKey
|
||||
// itself.
|
||||
UpdateKey(keyID string, sealedKey []byte, context Context) (rotatedKey []byte, err error)
|
||||
}
|
||||
|
||||
type masterKeyKMS struct {
|
||||
@@ -126,6 +139,13 @@ func (kms *masterKeyKMS) UnsealKey(keyID string, sealedKey []byte, ctx Context)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func (kms *masterKeyKMS) UpdateKey(keyID string, sealedKey []byte, ctx Context) ([]byte, error) {
|
||||
if _, err := kms.UnsealKey(keyID, sealedKey, ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sealedKey, nil // The master key cannot update data keys -> Do nothing.
|
||||
}
|
||||
|
||||
func (kms *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte) {
|
||||
if context == nil {
|
||||
context = Context{}
|
||||
|
||||
@@ -51,11 +51,20 @@ func TestMasterKeyKMS(t *testing.T) {
|
||||
t.Errorf("Test %d: KMS failed to unseal the generated key: %v", i, err)
|
||||
}
|
||||
if err == nil && test.ShouldFail {
|
||||
t.Errorf("Test %d: KMS unsealed the generated successfully but should have failed", i)
|
||||
t.Errorf("Test %d: KMS unsealed the generated key successfully but should have failed", i)
|
||||
}
|
||||
if !test.ShouldFail && !bytes.Equal(key[:], unsealedKey[:]) {
|
||||
t.Errorf("Test %d: The generated and unsealed key differ", i)
|
||||
}
|
||||
|
||||
rotatedKey, err := kms.UpdateKey(test.UnsealKeyID, sealedKey, test.UnsealContext)
|
||||
if err == nil && test.ShouldFail {
|
||||
t.Errorf("Test %d: KMS updated the generated key successfully but should have failed", i)
|
||||
}
|
||||
if !test.ShouldFail && !bytes.Equal(rotatedKey, sealedKey[:]) {
|
||||
t.Errorf("Test %d: The updated and sealed key differ", i)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -250,3 +250,30 @@ func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (k
|
||||
copy(key[:], []byte(plainKey))
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// UpdateKey re-wraps the sealedKey if the master key referenced by the keyID
|
||||
// has been changed by the KMS operator - i.e. the master key has been rotated.
|
||||
// If the master key hasn't changed since the sealedKey has been created / updated
|
||||
// it may return the same sealedKey as rotatedKey.
|
||||
//
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UpdateKey(keyID string, sealedKey []byte, ctx Context) (rotatedKey []byte, err error) {
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/rewrap/%s", keyID), payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ciphertext, ok := s.Data["ciphertext"]
|
||||
if !ok {
|
||||
return nil, errMissingUpdatedKey
|
||||
}
|
||||
rotatedKey = []byte(ciphertext.(string))
|
||||
return rotatedKey, nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user