Compare commits
263 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
90bfa6260a | ||
|
|
8b80eca184 | ||
|
|
fb1374f2f7 | ||
|
|
ff5bf51952 | ||
|
|
61927d228c | ||
|
|
82b9f2c931 | ||
|
|
6a19d7b25a | ||
|
|
ac2e0596bd | ||
|
|
42c821e164 | ||
|
|
20b907d8fb | ||
|
|
f45977d371 | ||
|
|
4ec9b349d0 | ||
|
|
65ac7c5671 | ||
|
|
5c2af3f792 | ||
|
|
8771e83545 | ||
|
|
fa5a1cebd9 | ||
|
|
4f981a0b42 | ||
|
|
127641731a | ||
|
|
c1a17c2561 | ||
|
|
1c5b05c130 | ||
|
|
4155f4e49b | ||
|
|
f3022e891d | ||
|
|
c28405a5c2 | ||
|
|
2a2ff96ee1 | ||
|
|
fd53057654 | ||
|
|
3094615e38 | ||
|
|
ff726969aa | ||
|
|
be313f1758 | ||
|
|
704be85987 | ||
|
|
c8da04ba5b | ||
|
|
9ed423b13f | ||
|
|
c8fbc94329 | ||
|
|
be70ef59e7 | ||
|
|
a790877c01 | ||
|
|
77dc2031a2 | ||
|
|
975134e42b | ||
|
|
cb7d23cb17 | ||
|
|
2e02e1889b | ||
|
|
3b05e175d7 | ||
|
|
b512241300 | ||
|
|
dbf7b1e573 | ||
|
|
26985ac632 | ||
|
|
ad75683bde | ||
|
|
4925bc3e80 | ||
|
|
ffded5a930 | ||
|
|
1127293863 | ||
|
|
2b51fe9f26 | ||
|
|
4780fa5a58 | ||
|
|
520552ffa9 | ||
|
|
b823d6d7bd | ||
|
|
a9d724120f | ||
|
|
dc0dce9beb | ||
|
|
0e1408844b | ||
|
|
e34369c860 | ||
|
|
8dc897b5f5 | ||
|
|
456ce4cc92 | ||
|
|
696f4ceee2 | ||
|
|
dac1cf5a9a | ||
|
|
cb01516a26 | ||
|
|
dfa1b417a8 | ||
|
|
31bee6b6ed | ||
|
|
04b92124c5 | ||
|
|
5392eee250 | ||
|
|
fa32c71a56 | ||
|
|
c9b8bd8de2 | ||
|
|
017456df63 | ||
|
|
14b137aa66 | ||
|
|
3064da7b08 | ||
|
|
76df027264 | ||
|
|
208efb843b | ||
|
|
9ac12cf898 | ||
|
|
ddea0bdf11 | ||
|
|
e7f491a14b | ||
|
|
8700945cdf | ||
|
|
ff6aabd9c0 | ||
|
|
6ba323b009 | ||
|
|
e7b3f39064 | ||
|
|
9fa727d154 | ||
|
|
73e4e99942 | ||
|
|
a87fc7d09b | ||
|
|
475df52a19 | ||
|
|
5512baab21 | ||
|
|
77963078a2 | ||
|
|
3f258062d8 | ||
|
|
3d65dc8d94 | ||
|
|
53e4887e02 | ||
|
|
a7be313230 | ||
|
|
e12f52e2c6 | ||
|
|
432cb38dbd | ||
|
|
18fedc67d5 | ||
|
|
a0456ce940 | ||
|
|
94e5cb7576 | ||
|
|
33aec08e8c | ||
|
|
5bde31d021 | ||
|
|
b52a3e523c | ||
|
|
428836d4e1 | ||
|
|
259a5d825b | ||
|
|
29f64355ce | ||
|
|
496fba3e9a | ||
|
|
810a44e951 | ||
|
|
341d61e3d8 | ||
|
|
8a71b0ec5a | ||
|
|
0772438125 | ||
|
|
b3c19e2d4b | ||
|
|
ec9bfd3aef | ||
|
|
fa3546bb03 | ||
|
|
42e716a094 | ||
|
|
6b2ed0fc47 | ||
|
|
0cd0f6c255 | ||
|
|
4553db3872 | ||
|
|
847a3ea0a2 | ||
|
|
1f3d270de8 | ||
|
|
daa57e8e5a | ||
|
|
eb18c82976 | ||
|
|
2ab0681c0c | ||
|
|
83d4c5763c | ||
|
|
c6f86d35d3 | ||
|
|
d65a2c6725 | ||
|
|
70136fb55b | ||
|
|
d6dd98e597 | ||
|
|
c38ada1a26 | ||
|
|
48bc3f1d53 | ||
|
|
e211f6f52e | ||
|
|
f13f421e84 | ||
|
|
cd03bfb3cf | ||
|
|
add1b6cb6b | ||
|
|
4f3c9d7677 | ||
|
|
2e19619e79 | ||
|
|
de736f2864 | ||
|
|
2fa98b1d6a | ||
|
|
99a4298938 | ||
|
|
069badc7e9 | ||
|
|
c601cb2f1e | ||
|
|
9ca7470ccc | ||
|
|
b3ca304c01 | ||
|
|
60f52f461f | ||
|
|
e5fb6294a7 | ||
|
|
a15bb19d37 | ||
|
|
7bf1caa0fe | ||
|
|
6e7962bf35 | ||
|
|
825e29f301 | ||
|
|
cebeca3075 | ||
|
|
26640033b0 | ||
|
|
bbb56739bd | ||
|
|
1cd801b2e9 | ||
|
|
bf9b619d86 | ||
|
|
bc79b435a2 | ||
|
|
0ed6daab59 | ||
|
|
e96f19c867 | ||
|
|
aaef18b1a3 | ||
|
|
a48a034e5a | ||
|
|
bf8ec8ad73 | ||
|
|
8ce424bacd | ||
|
|
cea3e3f7a6 | ||
|
|
af36c92cab | ||
|
|
1b258da108 | ||
|
|
35427a017d | ||
|
|
a0715f3b59 | ||
|
|
5a28ef0d47 | ||
|
|
3385bf3da8 | ||
|
|
1ce8d2c476 | ||
|
|
a8296445ad | ||
|
|
43c72374d4 | ||
|
|
d759a7ce99 | ||
|
|
5d2b5ee6a9 | ||
|
|
9ee619ea14 | ||
|
|
55b385beee | ||
|
|
930943f058 | ||
|
|
e6d8e272ce | ||
|
|
b52b90412b | ||
|
|
843f481eb3 | ||
|
|
f6d0645a3c | ||
|
|
414a7eca83 | ||
|
|
5cd9f10a02 | ||
|
|
b976521c83 | ||
|
|
2c3b1f01d9 | ||
|
|
a6f4cf61f2 | ||
|
|
dfa8835720 | ||
|
|
c5ac901e8d | ||
|
|
4101d4917c | ||
|
|
d966d29fed | ||
|
|
c301f5882d | ||
|
|
123cccaed1 | ||
|
|
cbd02c58be | ||
|
|
c71895f225 | ||
|
|
b83413b167 | ||
|
|
63e0a81760 | ||
|
|
8d47ef503c | ||
|
|
54eded2e6f | ||
|
|
94c88890b8 | ||
|
|
e871e27562 | ||
|
|
007a52b546 | ||
|
|
efb8b00db0 | ||
|
|
d744865dc6 | ||
|
|
e40c29e834 | ||
|
|
b0cea1c0f3 | ||
|
|
6f2b4675fa | ||
|
|
a4ce1daf99 | ||
|
|
7bdaf9bc50 | ||
|
|
55d4eee6f1 | ||
|
|
ac82798d0a | ||
|
|
5b71c21330 | ||
|
|
3e3fbdf8e6 | ||
|
|
c9349747ca | ||
|
|
2b9b907f9c | ||
|
|
9389a55e5d | ||
|
|
87e6533cf3 | ||
|
|
38bc3a45db | ||
|
|
c5faba55c1 | ||
|
|
8b5e6e338c | ||
|
|
0373a1699b | ||
|
|
559a59220e | ||
|
|
59e1763816 | ||
|
|
041a812ba0 | ||
|
|
fbfc9a61ec | ||
|
|
be9baa1464 | ||
|
|
b058e32348 | ||
|
|
ea66a52ed1 | ||
|
|
55dd017e62 | ||
|
|
12353caf35 | ||
|
|
a57c747667 | ||
|
|
28661c0413 | ||
|
|
04a152be12 | ||
|
|
bce3f8237d | ||
|
|
16a45e5aff | ||
|
|
000a60f238 | ||
|
|
bf278ca36f | ||
|
|
97f2bc26b9 | ||
|
|
bba562235b | ||
|
|
2337e5f803 | ||
|
|
ffd7b7059c | ||
|
|
5c0acbc6fc | ||
|
|
5a52bc7ff6 | ||
|
|
045e1fed2b | ||
|
|
a861d38532 | ||
|
|
20a15567b8 | ||
|
|
94f67ad224 | ||
|
|
36ee110563 | ||
|
|
1dc25bcf5f | ||
|
|
2d96745156 | ||
|
|
9d49688c87 | ||
|
|
8e09374cb8 | ||
|
|
58d90ed73c | ||
|
|
e857b6741d | ||
|
|
0505ef83b5 | ||
|
|
22bc15d89b | ||
|
|
a2e904b966 | ||
|
|
cc7dc61eb4 | ||
|
|
c4f480a839 | ||
|
|
60831e3299 | ||
|
|
037319066f | ||
|
|
bb871a7c31 | ||
|
|
0ebbd3caef | ||
|
|
bd56f80250 | ||
|
|
a39e810965 | ||
|
|
09103991ea | ||
|
|
c43f745449 | ||
|
|
9610a74c19 | ||
|
|
0bcd8abc5c | ||
|
|
70b350c383 | ||
|
|
338e9a9be9 | ||
|
|
edbd8709ec | ||
|
|
5db60a6c59 |
2
.dockerignore
Normal file
2
.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
||||
.git
|
||||
.github
|
||||
28
.github/PULL_REQUEST_TEMPLATE.md
vendored
28
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,33 +1,19 @@
|
||||
<!--- Provide a general summary of your changes in the Title above -->
|
||||
|
||||
## Description
|
||||
<!--- Describe your changes in detail -->
|
||||
|
||||
|
||||
## Motivation and Context
|
||||
<!--- Why is this change required? What problem does it solve? -->
|
||||
<!--- If it fixes an open issue, please link to the issue here. -->
|
||||
|
||||
## Regression
|
||||
<!-- Is this PR fixing a regression? (Yes / No) -->
|
||||
<!-- If Yes, optionally please include minio version or commit id or PR# that caused this regression, if you have these details. -->
|
||||
|
||||
## How Has This Been Tested?
|
||||
<!--- Please describe in detail how you tested your changes. -->
|
||||
<!--- Include details of your testing environment, and the tests you ran to -->
|
||||
<!--- see how your change affects other areas of the code, etc. -->
|
||||
## How to test this PR?
|
||||
|
||||
|
||||
## Types of changes
|
||||
<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
|
||||
- [ ] Bug fix (non-breaking change which fixes an issue)
|
||||
- [ ] New feature (non-breaking change which adds functionality)
|
||||
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
|
||||
|
||||
## Checklist:
|
||||
<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
|
||||
<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
|
||||
- [ ] My code follows the code style of this project.
|
||||
- [ ] My change requires a change to the documentation.
|
||||
- [ ] I have updated the documentation accordingly.
|
||||
- [ ] I have added unit tests to cover my changes.
|
||||
- [ ] I have added/updated functional tests in [mint](https://github.com/minio/mint). (If yes, add `mint` PR # here: )
|
||||
- [ ] All new and existing tests passed.
|
||||
- [ ] Fixes a regression (If yes, please add `commit-id` or `PR #` here)
|
||||
- [ ] Documentation needed
|
||||
- [ ] Unit tests needed
|
||||
- [ ] Functional tests needed (If yes, add [mint](https://github.com/minio/mint) PR # here: )
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -9,7 +9,6 @@ site/
|
||||
/.idea/
|
||||
/Minio.iml
|
||||
**/access.log
|
||||
build/
|
||||
vendor/**/*.js
|
||||
vendor/**/*.json
|
||||
release
|
||||
@@ -23,5 +22,3 @@ prime/
|
||||
stage/
|
||||
.sia_temp/
|
||||
config.json
|
||||
healthcheck
|
||||
check-user
|
||||
|
||||
25
.travis.yml
25
.travis.yml
@@ -2,6 +2,14 @@ go_import_path: github.com/minio/minio
|
||||
|
||||
language: go
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- shellcheck
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
# this ensures PRs based on a local branch are not built twice
|
||||
# the downside is that a PR targeting a different branch is not built
|
||||
# but as a workaround you can add the branch to this list
|
||||
@@ -18,15 +26,7 @@ matrix:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
# Enable build cache
|
||||
# https://restic.net/blog/2018-09-02/travis-build-cache
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
- $HOME/gopath/pkg/mod
|
||||
- $HOME/go/pkg/mod
|
||||
|
||||
go: 1.12.1
|
||||
go: 1.13.x
|
||||
script:
|
||||
- make
|
||||
- diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
@@ -37,12 +37,14 @@ matrix:
|
||||
- make verify
|
||||
- make coverage
|
||||
- cd browser && yarn && yarn test && cd ..
|
||||
- bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
|
||||
|
||||
- os: windows
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
go: 1.12.1
|
||||
go: 1.13.x
|
||||
script:
|
||||
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
|
||||
- bash buildscripts/go-coverage.sh
|
||||
@@ -54,6 +56,3 @@ before_script:
|
||||
|
||||
before_install:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install 11.10.1 ; fi
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
||||
@@ -37,7 +37,7 @@ After your code changes, make sure
|
||||
- To add test cases for the new code. If you have questions about how to do it, please ask on our [Slack](https://slack.min.io) channel.
|
||||
- To run `make verifiers`
|
||||
- To squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
|
||||
- To run `go test -race ./...` and `go build` completes.
|
||||
- To run `make test` and `make build` completes.
|
||||
|
||||
### Commit changes
|
||||
After verification, commit your changes. This is a [great post](https://chris.beams.io/posts/git-commit/) on how to write useful commit messages
|
||||
|
||||
14
Dockerfile
14
Dockerfile
@@ -1,29 +1,27 @@
|
||||
FROM golang:1.12-alpine
|
||||
FROM golang:1.13-alpine
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
ENV GOPROXY https://proxy.golang.org
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio && cd minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
|
||||
cd dockerscripts; go build -tags kqueue -ldflags "-s -w" -o /usr/bin/healthcheck healthcheck.go && \
|
||||
go build -tags kqueue -ldflags "-s -w" -o /usr/bin/check-user check-user.go
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM alpine:3.9
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=0 /go/bin/minio /usr/bin/minio
|
||||
COPY --from=0 /usr/bin/healthcheck /usr/bin/healthcheck
|
||||
COPY --from=0 /usr/bin/check-user /usr/bin/check-user
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
@@ -34,6 +32,4 @@ ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
HEALTHCHECK --interval=1m CMD healthcheck
|
||||
|
||||
CMD ["minio"]
|
||||
|
||||
@@ -2,20 +2,19 @@ FROM alpine:3.9
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck dockerscripts/check-user /usr/bin/
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY minio /usr/bin/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/healthcheck && \
|
||||
chmod +x /usr/bin/check-user
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -23,6 +22,4 @@ ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
HEALTHCHECK --interval=1m CMD healthcheck
|
||||
|
||||
CMD ["minio"]
|
||||
|
||||
23
Dockerfile.mint
Normal file
23
Dockerfile.mint
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM ubuntu:16.04
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
|
||||
ENV GOPATH /usr/local
|
||||
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
|
||||
ENV MINT_ROOT_DIR /mint
|
||||
|
||||
COPY mint /mint
|
||||
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && \
|
||||
apt-get --yes --quiet install wget jq curl git dnsmasq && \
|
||||
cd /mint && /mint/release.sh
|
||||
|
||||
WORKDIR /mint
|
||||
|
||||
ENTRYPOINT ["/mint/entrypoint.sh"]
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.12-alpine
|
||||
FROM golang:1.13-alpine
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
@@ -6,30 +6,25 @@ ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio && cd minio/dockerscripts && \
|
||||
go build -tags kqueue -ldflags "-s -w" -o /usr/bin/healthcheck healthcheck.go && \
|
||||
go build -tags kqueue -ldflags "-s -w" -o /usr/bin/check-user check-user.go
|
||||
git clone https://github.com/minio/minio
|
||||
|
||||
FROM alpine:3.9
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY --from=0 /usr/bin/healthcheck /usr/bin/healthcheck
|
||||
COPY --from=0 /usr/bin/check-user /usr/bin/check-user
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.min.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/healthcheck && \
|
||||
chmod +x /usr/bin/check-user
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -37,6 +32,4 @@ ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
HEALTHCHECK --interval=1m CMD healthcheck
|
||||
|
||||
CMD ["minio"]
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#-------------------------------------------------------------
|
||||
# Stage 1: Build and Unit tests
|
||||
#-------------------------------------------------------------
|
||||
FROM golang:1.12
|
||||
FROM golang:1.13
|
||||
|
||||
COPY . /go/src/github.com/minio/minio
|
||||
WORKDIR /go/src/github.com/minio/minio
|
||||
@@ -33,7 +33,7 @@ USER ci
|
||||
RUN make
|
||||
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
|
||||
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
|
||||
RUN for d in $(go list ./... | grep -v browser); do go test -v -race --timeout 15m "$d"; done
|
||||
RUN for d in $(go list ./... | grep -v browser); do go test -v -race --timeout 20m "$d"; done
|
||||
RUN make verifiers
|
||||
RUN make crosscompile
|
||||
RUN make verify
|
||||
@@ -56,15 +56,17 @@ FROM ubuntu:16.04
|
||||
|
||||
COPY --from=0 /go/src/github.com/minio/minio/minio /usr/bin/minio
|
||||
COPY buildscripts/gateway-tests.sh /usr/bin/gateway-tests.sh
|
||||
COPY mint /mint
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV LANG C.UTF-8
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /usr/local
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
ENV MINT_ROOT_DIR /mint
|
||||
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && apt-get --yes --quiet install wget jq curl git dnsmasq && \
|
||||
git clone https://github.com/minio/mint.git /mint && \
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && \
|
||||
apt-get --yes --quiet install wget jq curl git dnsmasq && \
|
||||
cd /mint && /mint/release.sh
|
||||
|
||||
WORKDIR /mint
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
# For maintainers only
|
||||
|
||||
### Setup your minio GitHub Repository
|
||||
|
||||
Fork [minio upstream](https://github.com/minio/minio/fork) source repository to your own personal repository.
|
||||
```bash
|
||||
$ mkdir -p $GOPATH/src/github.com/minio
|
||||
$ cd $GOPATH/src/github.com/minio
|
||||
$ git clone https://github.com/$USER_ID/minio
|
||||
$
|
||||
```
|
||||
|
||||
``minio`` uses [govendor](https://github.com/kardianos/govendor) for its dependency management.
|
||||
|
||||
### To manage dependencies
|
||||
|
||||
#### Add new dependencies
|
||||
|
||||
- Run `go get foo/bar`
|
||||
- Edit your code to import foo/bar
|
||||
- Run `govendor add foo/bar` from top-level directory
|
||||
|
||||
#### Remove dependencies
|
||||
|
||||
- Run `govendor remove foo/bar`
|
||||
|
||||
#### Update dependencies
|
||||
|
||||
- Run `govendor remove +vendor`
|
||||
- Run to update the dependent package `go get -u foo/bar`
|
||||
- Run `govendor add +external`
|
||||
|
||||
### Making new releases
|
||||
|
||||
`minio` doesn't follow semantic versioning style, `minio` instead uses the release date and time as the release versions.
|
||||
|
||||
`make release` will generate new binary into `release` directory.
|
||||
32
Makefile
32
Makefile
@@ -2,13 +2,12 @@ PWD := $(shell pwd)
|
||||
GOPATH := $(shell go env GOPATH)
|
||||
LDFLAGS := $(shell go run buildscripts/gen-ldflags.go)
|
||||
|
||||
GOARCH := $(shell go env GOARCH)
|
||||
GOOS := $(shell go env GOOS)
|
||||
GOOSALT ?= 'linux'
|
||||
ifeq ($(GOOS),'darwin')
|
||||
GOOSALT = 'mac'
|
||||
endif
|
||||
|
||||
TAG ?= $(USER)
|
||||
VERSION ?= $(shell git describe --tags)
|
||||
TAG ?= "minio/minio:$(VERSION)"
|
||||
|
||||
BUILD_LDFLAGS := '$(LDFLAGS)'
|
||||
|
||||
all: build
|
||||
@@ -19,9 +18,13 @@ checks:
|
||||
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golint 1>/dev/null || (echo "Installing golint" && go get -u golang.org/x/lint/golint)
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet -O ${GOPATH}/bin/staticcheck https://github.com/dominikh/go-tools/releases/download/2019.1/staticcheck_${GOOS}_amd64 && chmod +x ${GOPATH}/bin/staticcheck)
|
||||
@which misspell 1>/dev/null || (echo "Installing misspell" && wget --quiet https://github.com/client9/misspell/releases/download/v0.3.4/misspell_0.3.4_${GOOSALT}_64bit.tar.gz && tar xf misspell_0.3.4_${GOOSALT}_64bit.tar.gz && mv misspell ${GOPATH}/bin/misspell && chmod +x ${GOPATH}/bin/misspell && rm -f misspell_0.3.4_${GOOSALT}_64bit.tar.gz)
|
||||
@which golint 1>/dev/null || (echo "Installing golint" && GO111MODULE=off go get -u golang.org/x/lint/golint)
|
||||
ifeq ($(GOARCH),s390x)
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && GO111MODULE=off go get honnef.co/go/tools/cmd/staticcheck)
|
||||
else
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet https://github.com/dominikh/go-tools/releases/download/2019.2.3/staticcheck_${GOOS}_${GOARCH}.tar.gz && tar xf staticcheck_${GOOS}_${GOARCH}.tar.gz && mv staticcheck/staticcheck ${GOPATH}/bin/staticcheck && chmod +x ${GOPATH}/bin/staticcheck && rm -f staticcheck_${GOOS}_${GOARCH}.tar.gz && rm -rf staticcheck)
|
||||
endif
|
||||
@which misspell 1>/dev/null || (echo "Installing misspell" && GO111MODULE=off go get -u github.com/client9/misspell/cmd/misspell)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
@@ -29,25 +32,26 @@ crosscompile:
|
||||
verifiers: getdeps vet fmt lint staticcheck spelling
|
||||
|
||||
vet:
|
||||
@echo "Running $@"
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on go vet github.com/minio/minio/...
|
||||
|
||||
fmt:
|
||||
@echo "Running $@"
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on gofmt -d cmd/
|
||||
@GO111MODULE=on gofmt -d pkg/
|
||||
|
||||
lint:
|
||||
@echo "Running $@"
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/...
|
||||
|
||||
staticcheck:
|
||||
@echo "Running $@"
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/...
|
||||
|
||||
spelling:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find cmd/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find pkg/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find docs/`
|
||||
@@ -71,9 +75,7 @@ coverage: build
|
||||
# Builds minio locally.
|
||||
build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/dockerscripts/healthcheck $(PWD)/dockerscripts/healthcheck.go 1>/dev/null
|
||||
@GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/dockerscripts/check-user $(PWD)/dockerscripts/check-user.go 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
docker: build
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
||||
23
README.md
23
README.md
@@ -1,10 +1,15 @@
|
||||
# MinIO Quickstart Guide
|
||||
[](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
MinIO is an object storage server released under Apache License v2.0. It is compatible with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB.
|
||||
MinIO is an object storage server released under Apache License v2.0. It is compatible[1] with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB.
|
||||
|
||||
MinIO server is light enough to be bundled with the application stack, similar to NodeJS, Redis and MySQL.
|
||||
|
||||
[1] MinIO in its default mode is faster and does not calculate MD5Sum unless passed by client. This may lead to incompatibility with some S3 clients like s3ql that heavily depend on MD5Sum. For such applications start MinIO with `--compat` option.
|
||||
```sh
|
||||
minio --compat server /data
|
||||
```
|
||||
|
||||
## Docker Container
|
||||
### Stable
|
||||
```
|
||||
@@ -157,6 +162,22 @@ When deployed on a single drive, MinIO server lets clients access any pre-existi
|
||||
|
||||
The above statement is also valid for all gateway backends.
|
||||
|
||||
## Upgrading MinIO
|
||||
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster and restart them, as shown in the following command from the MinIO client (mc):
|
||||
|
||||
```
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
```
|
||||
|
||||
**Important things to remember during upgrades**:
|
||||
|
||||
- `mc admin update` will only work if the user running MinIO has write access to the parent directory where the binary is located, for example if the current binary is at `/usr/local/bin/minio`, you would need write access to `/usr/local/bin`.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` until all clusters have been updated.
|
||||
- If you are updating the server it is always recommended (unless explicitly mentioned in MinIO server release notes), to update `mc` once all the servers have been upgraded using `mc update`.
|
||||
- `mc admin update` is disabled in docker/container environments, container environments provide their own mechanisms for updating running containers.
|
||||
- If you are using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If you are using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
## Explore Further
|
||||
- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
- [Use `mc` with MinIO Server](https://docs.min.io/docs/minio-client-quickstart-guide)
|
||||
|
||||
@@ -75,7 +75,7 @@ service minio start
|
||||
|
||||
## 使用源码安装
|
||||
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://docs.min.io/docs/how-to-install-golang).
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install).
|
||||
|
||||
```sh
|
||||
go get -u github.com/minio/minio
|
||||
|
||||
@@ -13,7 +13,7 @@ nvm install stable
|
||||
|
||||
### Install `go-bindata` and `go-bindata-assetfs`
|
||||
|
||||
If you do not have a working Golang environment, please follow [Install Golang](https://docs.min.io/docs/how-to-install-golang)
|
||||
If you do not have a working Golang environment, please follow [Install Golang](https://golang.org/doc/install)
|
||||
|
||||
```sh
|
||||
go get github.com/go-bindata/go-bindata/go-bindata
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
<!--[if lt IE 11]>
|
||||
<div class="ie-warning">
|
||||
<div class="iw-inner">
|
||||
<i class="iwi-icon fa fa-warning"></i>
|
||||
<i class="iwi-icon fas fa-exclamation-triangle"></i>
|
||||
|
||||
You are using Internet Explorer version 12.0 or lower. Due to security issues and lack of support for Web Standards it is highly recommended that you upgrade to a modern browser
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
import "babel-polyfill"
|
||||
import "./less/main.less"
|
||||
import "font-awesome/css/font-awesome.css"
|
||||
import "@fortawesome/fontawesome-free/css/all.css"
|
||||
import "material-design-iconic-font/dist/css/material-design-iconic-font.min.css"
|
||||
|
||||
import React from "react"
|
||||
|
||||
@@ -84,32 +84,32 @@ export class BrowserDropdown extends React.Component {
|
||||
<li>
|
||||
<Dropdown pullRight id="top-right-menu">
|
||||
<Dropdown.Toggle noCaret>
|
||||
<i className="fa fa-reorder" />
|
||||
<i className="fas fa-bars" />
|
||||
</Dropdown.Toggle>
|
||||
<Dropdown.Menu className="dropdown-menu-right">
|
||||
<li>
|
||||
<a target="_blank" href="https://github.com/minio/minio">
|
||||
GitHub <i className="fa fa-github" />
|
||||
GitHub <i className="fab fa-github" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.fullScreen}>
|
||||
Fullscreen <i className="fa fa-expand" />
|
||||
Fullscreen <i className="fas fa-expand" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/">
|
||||
Documentation <i className="fa fa-book" />
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://slack.min.io">
|
||||
Ask for help <i className="fa fa-question-circle" />
|
||||
Ask for help <i className="fas fa-question-circle" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" id="show-about" onClick={this.showAbout.bind(this)}>
|
||||
About <i className="fa fa-info-circle" />
|
||||
About <i className="fas fa-info-circle" />
|
||||
</a>
|
||||
{this.state.showAboutModal && (
|
||||
<AboutModal
|
||||
@@ -120,7 +120,7 @@ export class BrowserDropdown extends React.Component {
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fa fa-cog" />
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
@@ -131,7 +131,7 @@ export class BrowserDropdown extends React.Component {
|
||||
</li>
|
||||
<li>
|
||||
<a href="" id="logout" onClick={this.logout}>
|
||||
Sign Out <i className="fa fa-sign-out" />
|
||||
Sign Out <i className="fas fa-sign-out-alt" />
|
||||
</a>
|
||||
</li>
|
||||
</Dropdown.Menu>
|
||||
|
||||
@@ -165,7 +165,7 @@ export class ChangePasswordModal extends React.Component {
|
||||
})
|
||||
}}
|
||||
className={
|
||||
"toggle-password fa fa-eye " +
|
||||
"toggle-password fas fa-eye " +
|
||||
(this.state.currentSecretKeyVisible ? "toggled" : "")
|
||||
}
|
||||
/>
|
||||
@@ -211,7 +211,7 @@ export class ChangePasswordModal extends React.Component {
|
||||
})
|
||||
}}
|
||||
className={
|
||||
"toggle-password fa fa-eye " +
|
||||
"toggle-password fas fa-eye " +
|
||||
(this.state.newSecretKeyVisible ? "toggled" : "")
|
||||
}
|
||||
/>
|
||||
|
||||
@@ -18,7 +18,7 @@ import React from "react"
|
||||
|
||||
export const Host = () => (
|
||||
<div className="fes-host">
|
||||
<i className="fa fa-globe" />
|
||||
<i className="fas fa-globe-americas" />
|
||||
<a href="/">{window.location.host}</a>
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -125,7 +125,7 @@ export class Login extends React.Component {
|
||||
autoComplete="new-password"
|
||||
/>
|
||||
<button className="lw-btn" type="submit">
|
||||
<i className="fa fa-sign-in" />
|
||||
<i className="fas fa-sign-in-alt" />
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
@@ -48,7 +48,7 @@ export const MainActions = ({
|
||||
<Dropdown dropup className="feb-actions" id="fe-action-toggle">
|
||||
<Dropdown.Toggle noCaret className="feba-toggle">
|
||||
<span>
|
||||
<i className="fa fa-plus" />
|
||||
<i className="fas fa-plus" />
|
||||
</span>
|
||||
</Dropdown.Toggle>
|
||||
<Dropdown.Menu>
|
||||
@@ -63,7 +63,7 @@ export const MainActions = ({
|
||||
/>
|
||||
<label htmlFor="file-input">
|
||||
{" "}
|
||||
<i className="fa fa-cloud-upload" />{" "}
|
||||
<i className="fas fa-cloud-upload-alt" />{" "}
|
||||
</label>
|
||||
</a>
|
||||
</OverlayTrigger>
|
||||
@@ -78,7 +78,7 @@ export const MainActions = ({
|
||||
showMakeBucketModal()
|
||||
}}
|
||||
>
|
||||
<i className="fa fa-hdd-o" />
|
||||
<i className="far fa-hdd" />
|
||||
</a>
|
||||
</OverlayTrigger>
|
||||
)}
|
||||
|
||||
@@ -20,7 +20,6 @@ import ClickOutHandler from "react-onclickout"
|
||||
import { connect } from "react-redux"
|
||||
|
||||
import logo from "../../img/logo.svg"
|
||||
import Dropdown from "react-bootstrap/lib/Dropdown"
|
||||
import BucketSearch from "../buckets/BucketSearch"
|
||||
import BucketList from "../buckets/BucketList"
|
||||
import Host from "./Host"
|
||||
@@ -28,8 +27,14 @@ import * as actionsCommon from "./actions"
|
||||
import web from "../web"
|
||||
|
||||
export const SideBar = ({ sidebarOpen, clickOutside }) => {
|
||||
const onClickOut = e => {
|
||||
if (e.target.classList.contains("feh-trigger")) {
|
||||
return
|
||||
}
|
||||
clickOutside()
|
||||
}
|
||||
return (
|
||||
<ClickOutHandler onClickOut={clickOutside}>
|
||||
<ClickOutHandler onClickOut={onClickOut}>
|
||||
<div
|
||||
className={classNames({
|
||||
"fe-sidebar": true,
|
||||
@@ -62,4 +67,7 @@ const mapDispatchToProps = dispatch => {
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(SideBar)
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
mapDispatchToProps
|
||||
)(SideBar)
|
||||
|
||||
@@ -35,7 +35,20 @@ describe("SideBar", () => {
|
||||
it("should call clickOutside when the user clicks outside the sidebar", () => {
|
||||
const clickOutside = jest.fn()
|
||||
const wrapper = shallow(<SideBar clickOutside={clickOutside} />)
|
||||
wrapper.simulate("clickOut", { preventDefault: jest.fn() })
|
||||
wrapper.simulate("clickOut", {
|
||||
preventDefault: jest.fn(),
|
||||
target: { classList: { contains: jest.fn(() => false) } }
|
||||
})
|
||||
expect(clickOutside).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should not call clickOutside when user clicks on sidebar toggle", () => {
|
||||
const clickOutside = jest.fn()
|
||||
const wrapper = shallow(<SideBar clickOutside={clickOutside} />)
|
||||
wrapper.simulate("clickOut", {
|
||||
preventDefault: jest.fn(),
|
||||
target: { classList: { contains: jest.fn(() => true) } }
|
||||
})
|
||||
expect(clickOutside).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -17,14 +17,29 @@
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import { Scrollbars } from "react-custom-scrollbars"
|
||||
import InfiniteScroll from "react-infinite-scroller"
|
||||
import * as actionsBuckets from "./actions"
|
||||
import { getVisibleBuckets } from "./selectors"
|
||||
import { getFilteredBuckets } from "./selectors"
|
||||
import BucketContainer from "./BucketContainer"
|
||||
import web from "../web"
|
||||
import history from "../history"
|
||||
import { pathSlice } from "../utils"
|
||||
|
||||
export class BucketList extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
page: 1
|
||||
}
|
||||
this.loadNextPage = this.loadNextPage.bind(this)
|
||||
}
|
||||
componentDidUpdate(prevProps) {
|
||||
if (this.props.filter !== prevProps.filter) {
|
||||
this.setState({
|
||||
page: 1
|
||||
})
|
||||
}
|
||||
}
|
||||
componentWillMount() {
|
||||
const { fetchBuckets, setBucketList, selectBucket } = this.props
|
||||
if (web.LoggedIn()) {
|
||||
@@ -39,18 +54,33 @@ export class BucketList extends React.Component {
|
||||
}
|
||||
}
|
||||
}
|
||||
loadNextPage() {
|
||||
this.setState({
|
||||
page: this.state.page + 1
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { visibleBuckets } = this.props
|
||||
const { filteredBuckets } = this.props
|
||||
const visibleBuckets = filteredBuckets.slice(0, this.state.page * 100)
|
||||
return (
|
||||
<div className="fesl-inner">
|
||||
<Scrollbars
|
||||
renderTrackVertical={props => <div className="scrollbar-vertical" />}
|
||||
>
|
||||
<ul>
|
||||
{visibleBuckets.map(bucket => (
|
||||
<BucketContainer key={bucket} bucket={bucket} />
|
||||
))}
|
||||
</ul>
|
||||
<InfiniteScroll
|
||||
pageStart={0}
|
||||
loadMore={this.loadNextPage}
|
||||
hasMore={filteredBuckets.length > visibleBuckets.length}
|
||||
useWindow={false}
|
||||
element="div"
|
||||
initialLoad={false}
|
||||
>
|
||||
<ul>
|
||||
{visibleBuckets.map(bucket => (
|
||||
<BucketContainer key={bucket} bucket={bucket} />
|
||||
))}
|
||||
</ul>
|
||||
</InfiniteScroll>
|
||||
</Scrollbars>
|
||||
</div>
|
||||
)
|
||||
@@ -59,7 +89,8 @@ export class BucketList extends React.Component {
|
||||
|
||||
const mapStateToProps = state => {
|
||||
return {
|
||||
visibleBuckets: getVisibleBuckets(state)
|
||||
filteredBuckets: getFilteredBuckets(state),
|
||||
filter: state.buckets.filter
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,13 +29,13 @@ jest.mock("../../web", () => ({
|
||||
describe("BucketList", () => {
|
||||
it("should render without crashing", () => {
|
||||
const fetchBuckets = jest.fn()
|
||||
shallow(<BucketList visibleBuckets={[]} fetchBuckets={fetchBuckets} />)
|
||||
shallow(<BucketList filteredBuckets={[]} fetchBuckets={fetchBuckets} />)
|
||||
})
|
||||
|
||||
it("should call fetchBuckets before component is mounted", () => {
|
||||
const fetchBuckets = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<BucketList visibleBuckets={[]} fetchBuckets={fetchBuckets} />
|
||||
<BucketList filteredBuckets={[]} fetchBuckets={fetchBuckets} />
|
||||
)
|
||||
expect(fetchBuckets).toHaveBeenCalled()
|
||||
})
|
||||
@@ -46,7 +46,7 @@ describe("BucketList", () => {
|
||||
history.push("/bk1/pre1")
|
||||
const wrapper = shallow(
|
||||
<BucketList
|
||||
visibleBuckets={[]}
|
||||
filteredBuckets={[]}
|
||||
setBucketList={setBucketList}
|
||||
selectBucket={selectBucket}
|
||||
/>
|
||||
|
||||
@@ -14,9 +14,9 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { getVisibleBuckets, getCurrentBucket } from "../selectors"
|
||||
import { getFilteredBuckets, getCurrentBucket } from "../selectors"
|
||||
|
||||
describe("getVisibleBuckets", () => {
|
||||
describe("getFilteredBuckets", () => {
|
||||
let state
|
||||
beforeEach(() => {
|
||||
state = {
|
||||
@@ -28,11 +28,11 @@ describe("getVisibleBuckets", () => {
|
||||
|
||||
it("should return all buckets if no filter specified", () => {
|
||||
state.buckets.filter = ""
|
||||
expect(getVisibleBuckets(state)).toEqual(["test1", "test11", "test2"])
|
||||
expect(getFilteredBuckets(state)).toEqual(["test1", "test11", "test2"])
|
||||
})
|
||||
|
||||
it("should return all matching buckets if filter is specified", () => {
|
||||
state.buckets.filter = "test1"
|
||||
expect(getVisibleBuckets(state)).toEqual(["test1", "test11"])
|
||||
expect(getFilteredBuckets(state)).toEqual(["test1", "test11"])
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@ import { createSelector } from "reselect"
|
||||
const bucketsSelector = state => state.buckets.list
|
||||
const bucketsFilterSelector = state => state.buckets.filter
|
||||
|
||||
export const getVisibleBuckets = createSelector(
|
||||
export const getFilteredBuckets = createSelector(
|
||||
bucketsSelector,
|
||||
bucketsFilterSelector,
|
||||
(buckets, filter) => buckets.filter(bucket => bucket.indexOf(filter) > -1)
|
||||
|
||||
@@ -29,7 +29,7 @@ let BrowserUpdate = ({latestUiVersion}) => {
|
||||
<a href="">
|
||||
<OverlayTrigger placement="left" overlay={ <Tooltip id="tt-version-update">
|
||||
New update available. Click to refresh.
|
||||
</Tooltip> }> <i className="fa fa-refresh"></i> </OverlayTrigger>
|
||||
</Tooltip> }> <i className="fas fa-sync"></i> </OverlayTrigger>
|
||||
</a>
|
||||
</li>
|
||||
)
|
||||
|
||||
@@ -23,7 +23,7 @@ export const DeleteObjectConfirmModal = ({
|
||||
}) => (
|
||||
<ConfirmModal
|
||||
show={true}
|
||||
icon="fa fa-exclamation-triangle mci-red"
|
||||
icon="fas fa-exclamation-triangle mci-red"
|
||||
text="Are you sure you want to delete?"
|
||||
sub="This cannot be undone!"
|
||||
okText="Delete"
|
||||
|
||||
@@ -67,14 +67,14 @@ export class ObjectActions extends React.Component {
|
||||
className="fiad-action"
|
||||
onClick={this.shareObject.bind(this)}
|
||||
>
|
||||
<i className="fa fa-share-alt" />
|
||||
<i className="fas fa-share-alt" />
|
||||
</a>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
onClick={this.showDeleteConfirmModal.bind(this)}
|
||||
>
|
||||
<i className="fa fa-trash" />
|
||||
<i className="fas fa-trash-alt" />
|
||||
</a>
|
||||
</Dropdown.Menu>
|
||||
{(showShareObjectModal && shareObjectName === object.name) &&
|
||||
|
||||
@@ -51,7 +51,7 @@ export const ObjectItem = ({
|
||||
</div>
|
||||
<div className="fesl-item fesl-item-name">
|
||||
<a
|
||||
href="#"
|
||||
href={getDataType(name, contentType) === "folder" ? name : "#"}
|
||||
onClick={e => {
|
||||
e.preventDefault()
|
||||
if (onClick) {
|
||||
|
||||
@@ -59,7 +59,7 @@ export class ObjectsBulkActions extends React.Component {
|
||||
}
|
||||
>
|
||||
<span className="la-label">
|
||||
<i className="fa fa-check-circle" /> {checkedObjects.length}
|
||||
<i className="fas fa-check-circle" /> {checkedObjects.length}
|
||||
{checkedObjects.length === 1 ? " Object " : " Objects "}
|
||||
selected
|
||||
</span>
|
||||
@@ -81,7 +81,7 @@ export class ObjectsBulkActions extends React.Component {
|
||||
</button>
|
||||
</span>
|
||||
<i
|
||||
className="la-close fa fa-times"
|
||||
className="la-close fas fa-times"
|
||||
id="close-bulk-actions"
|
||||
onClick={clearChecked}
|
||||
/>
|
||||
|
||||
@@ -47,9 +47,9 @@ export const ObjectsHeader = ({
|
||||
className={classNames({
|
||||
"fesli-sort": true,
|
||||
"fesli-sort--active": sortedByName,
|
||||
fa: true,
|
||||
"fa-sort-alpha-desc": sortedByName && sortOrder === SORT_ORDER_DESC,
|
||||
"fa-sort-alpha-asc": sortedByName && sortOrder === SORT_ORDER_ASC
|
||||
fas: true,
|
||||
"fa-sort-alpha-down-alt": sortedByName && sortOrder === SORT_ORDER_DESC,
|
||||
"fa-sort-alpha-down": sortedByName && sortOrder === SORT_ORDER_ASC
|
||||
})}
|
||||
/>
|
||||
</div>
|
||||
@@ -64,10 +64,10 @@ export const ObjectsHeader = ({
|
||||
className={classNames({
|
||||
"fesli-sort": true,
|
||||
"fesli-sort--active": sortedBySize,
|
||||
fa: true,
|
||||
"fa-sort-amount-desc":
|
||||
fas: true,
|
||||
"fa-sort-amount-down":
|
||||
sortedBySize && sortOrder === SORT_ORDER_DESC,
|
||||
"fa-sort-amount-asc": sortedBySize && sortOrder === SORT_ORDER_ASC
|
||||
"fa-sort-amount-down-alt": sortedBySize && sortOrder === SORT_ORDER_ASC
|
||||
})}
|
||||
/>
|
||||
</div>
|
||||
@@ -82,10 +82,10 @@ export const ObjectsHeader = ({
|
||||
className={classNames({
|
||||
"fesli-sort": true,
|
||||
"fesli-sort--active": sortedByLastModified,
|
||||
fa: true,
|
||||
"fa-sort-numeric-desc":
|
||||
fas: true,
|
||||
"fa-sort-numeric-down-alt":
|
||||
sortedByLastModified && sortOrder === SORT_ORDER_DESC,
|
||||
"fa-sort-numeric-asc":
|
||||
"fa-sort-numeric-down":
|
||||
sortedByLastModified && sortOrder === SORT_ORDER_ASC
|
||||
})}
|
||||
/>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2016 MinIO, Inc.
|
||||
* MinIO Cloud Storage (C) 2016, 2018, 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,46 +16,146 @@
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import ClickOutHandler from "react-onclickout"
|
||||
import { OverlayTrigger, Tooltip } from "react-bootstrap"
|
||||
import { getCurrentBucket } from "../buckets/selectors"
|
||||
import * as actionsObjects from "./actions"
|
||||
import * as actionsBuckets from "../buckets/actions"
|
||||
|
||||
export const Path = ({ currentBucket, currentPrefix, selectPrefix }) => {
|
||||
const onPrefixClick = (e, prefix) => {
|
||||
e.preventDefault()
|
||||
selectPrefix(prefix)
|
||||
export class Path extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
isEditing: false,
|
||||
path: ""
|
||||
}
|
||||
}
|
||||
let dirPath = []
|
||||
let path = ""
|
||||
if (currentPrefix) {
|
||||
path = currentPrefix.split("/").map((dir, i) => {
|
||||
if (dir) {
|
||||
dirPath.push(dir)
|
||||
let dirPath_ = dirPath.join("/") + "/"
|
||||
return (
|
||||
<span key={i}>
|
||||
<a href="" onClick={e => onPrefixClick(e, dirPath_)}>
|
||||
{dir}
|
||||
</a>
|
||||
</span>
|
||||
)
|
||||
}
|
||||
stopEditing() {
|
||||
this.setState({
|
||||
isEditing: false
|
||||
})
|
||||
}
|
||||
|
||||
return (
|
||||
<h2>
|
||||
<span className="main">
|
||||
<a onClick={e => onPrefixClick(e, "")} href="">
|
||||
{currentBucket}
|
||||
</a>
|
||||
</span>
|
||||
{path}
|
||||
</h2>
|
||||
)
|
||||
onPrefixClick(e, prefix) {
|
||||
e.preventDefault()
|
||||
const { selectPrefix } = this.props
|
||||
selectPrefix(prefix)
|
||||
}
|
||||
onEditClick(e) {
|
||||
e.preventDefault()
|
||||
const { currentBucket, currentPrefix } = this.props
|
||||
this.setState(
|
||||
{
|
||||
isEditing: true,
|
||||
path: `${currentBucket}/${currentPrefix}`
|
||||
},
|
||||
() => {
|
||||
// focus on input and move cursor to the end
|
||||
this.pathInput.focus()
|
||||
this.pathInput.setSelectionRange(
|
||||
this.state.path.length,
|
||||
this.state.path.length
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
onKeyDown(e) {
|
||||
// When Esc key is pressed
|
||||
if (e.keyCode === 27) {
|
||||
this.stopEditing()
|
||||
}
|
||||
}
|
||||
onInputClickOut() {
|
||||
this.stopEditing()
|
||||
}
|
||||
bucketExists(bucketName) {
|
||||
const { buckets } = this.props
|
||||
return buckets.includes(bucketName)
|
||||
}
|
||||
async onSubmit(e) {
|
||||
e.preventDefault()
|
||||
const { makeBucket, selectBucket } = this.props
|
||||
// all paths need to end in slash to display contents properly
|
||||
let path = this.state.path
|
||||
if (!path.endsWith("/")) {
|
||||
path += "/"
|
||||
}
|
||||
const splittedPath = path.split("/")
|
||||
if (splittedPath.length > 0) {
|
||||
// prevent bucket name from being empty
|
||||
if (splittedPath[0]) {
|
||||
const bucketName = splittedPath[0]
|
||||
const prefix = splittedPath.slice(1).join("/")
|
||||
if (!this.bucketExists(bucketName)) {
|
||||
await makeBucket(bucketName)
|
||||
}
|
||||
// check updated buckets and don't proceed on invalid inputs
|
||||
if (this.bucketExists(bucketName)) {
|
||||
// then select bucket with prefix
|
||||
selectBucket(bucketName, prefix)
|
||||
}
|
||||
this.stopEditing()
|
||||
}
|
||||
}
|
||||
}
|
||||
render() {
|
||||
const pathTooltip = <Tooltip id="tt-path">Choose or create new path</Tooltip>
|
||||
const { currentBucket, currentPrefix } = this.props
|
||||
let dirPath = []
|
||||
let path = ""
|
||||
if (currentPrefix) {
|
||||
path = currentPrefix.split("/").map((dir, i) => {
|
||||
if (dir) {
|
||||
dirPath.push(dir)
|
||||
let dirPath_ = dirPath.join("/") + "/"
|
||||
return (
|
||||
<span key={i}>
|
||||
<a href="" onClick={e => this.onPrefixClick(e, dirPath_)}>
|
||||
{dir}
|
||||
</a>
|
||||
</span>
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
return (
|
||||
<h2>
|
||||
{this.state.isEditing ? (
|
||||
<ClickOutHandler onClickOut={() => this.onInputClickOut()}>
|
||||
<form onSubmit={e => this.onSubmit(e)}>
|
||||
<input
|
||||
className="form-control form-control--path"
|
||||
type="text"
|
||||
placeholder="Choose or create new path"
|
||||
ref={node => (this.pathInput = node)}
|
||||
onKeyDown={e => this.onKeyDown(e)}
|
||||
value={this.state.path}
|
||||
onChange={e => this.setState({ path: e.target.value })}
|
||||
/>
|
||||
</form>
|
||||
</ClickOutHandler>
|
||||
) : (
|
||||
<React.Fragment>
|
||||
<span className="main">
|
||||
<a href="" onClick={e => this.onPrefixClick(e, "")}>
|
||||
{currentBucket}
|
||||
</a>
|
||||
</span>
|
||||
{path}
|
||||
<OverlayTrigger placement="bottom" overlay={pathTooltip}>
|
||||
<a href="" onClick={e => this.onEditClick(e)} className="fe-edit">
|
||||
<i className="fas fa-folder-plus" />
|
||||
</a>
|
||||
</OverlayTrigger>
|
||||
</React.Fragment>
|
||||
)}
|
||||
</h2>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const mapStateToProps = state => {
|
||||
return {
|
||||
buckets: state.buckets.list,
|
||||
currentBucket: getCurrentBucket(state),
|
||||
currentPrefix: state.objects.currentPrefix
|
||||
}
|
||||
@@ -63,8 +163,14 @@ const mapStateToProps = state => {
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
return {
|
||||
makeBucket: bucket => dispatch(actionsBuckets.makeBucket(bucket)),
|
||||
selectBucket: (bucket, prefix) =>
|
||||
dispatch(actionsBuckets.selectBucket(bucket, prefix)),
|
||||
selectPrefix: prefix => dispatch(actionsObjects.selectPrefix(prefix))
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(Path)
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
mapDispatchToProps
|
||||
)(Path)
|
||||
|
||||
@@ -35,7 +35,7 @@ describe("ObjectsHeader", () => {
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
wrapper.find("#sort-by-name i").hasClass("fa-sort-alpha-asc")
|
||||
wrapper.find("#sort-by-name i").hasClass("fa-sort-alpha-down")
|
||||
).toBeTruthy()
|
||||
})
|
||||
|
||||
@@ -49,7 +49,7 @@ describe("ObjectsHeader", () => {
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
wrapper.find("#sort-by-name i").hasClass("fa-sort-alpha-desc")
|
||||
wrapper.find("#sort-by-name i").hasClass("fa-sort-alpha-down-alt")
|
||||
).toBeTruthy()
|
||||
})
|
||||
|
||||
@@ -63,7 +63,7 @@ describe("ObjectsHeader", () => {
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
wrapper.find("#sort-by-size i").hasClass("fa-sort-amount-asc")
|
||||
wrapper.find("#sort-by-size i").hasClass("fa-sort-amount-down-alt")
|
||||
).toBeTruthy()
|
||||
})
|
||||
|
||||
@@ -77,7 +77,7 @@ describe("ObjectsHeader", () => {
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
wrapper.find("#sort-by-size i").hasClass("fa-sort-amount-desc")
|
||||
wrapper.find("#sort-by-size i").hasClass("fa-sort-amount-down")
|
||||
).toBeTruthy()
|
||||
})
|
||||
|
||||
@@ -91,7 +91,7 @@ describe("ObjectsHeader", () => {
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
wrapper.find("#sort-by-last-modified i").hasClass("fa-sort-numeric-asc")
|
||||
wrapper.find("#sort-by-last-modified i").hasClass("fa-sort-numeric-down")
|
||||
).toBeTruthy()
|
||||
})
|
||||
|
||||
@@ -105,7 +105,7 @@ describe("ObjectsHeader", () => {
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
wrapper.find("#sort-by-last-modified i").hasClass("fa-sort-numeric-desc")
|
||||
wrapper.find("#sort-by-last-modified i").hasClass("fa-sort-numeric-down-alt")
|
||||
).toBeTruthy()
|
||||
})
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { shallow, mount } from "enzyme"
|
||||
import { Path } from "../Path"
|
||||
|
||||
describe("Path", () => {
|
||||
@@ -26,7 +26,12 @@ describe("Path", () => {
|
||||
it("should render only bucket if there is no prefix", () => {
|
||||
const wrapper = shallow(<Path currentBucket={"test1"} currentPrefix={""} />)
|
||||
expect(wrapper.find("span").length).toBe(1)
|
||||
expect(wrapper.text()).toBe("test1")
|
||||
expect(
|
||||
wrapper
|
||||
.find("span")
|
||||
.at(0)
|
||||
.text()
|
||||
).toBe("test1")
|
||||
})
|
||||
|
||||
it("should render bucket and prefix", () => {
|
||||
@@ -69,4 +74,70 @@ describe("Path", () => {
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(selectPrefix).toHaveBeenCalledWith("a/b/")
|
||||
})
|
||||
|
||||
it("should switch to input mode when edit icon is clicked", () => {
|
||||
const wrapper = mount(<Path currentBucket={"test1"} currentPrefix={""} />)
|
||||
wrapper.find(".fe-edit").simulate("click", { preventDefault: jest.fn() })
|
||||
expect(wrapper.find(".form-control--path").exists()).toBeTruthy()
|
||||
})
|
||||
|
||||
it("should navigate to prefix when user types path for existing bucket", () => {
|
||||
const selectBucket = jest.fn()
|
||||
const buckets = ["test1", "test2"]
|
||||
const wrapper = mount(
|
||||
<Path
|
||||
buckets={buckets}
|
||||
currentBucket={"test1"}
|
||||
currentPrefix={""}
|
||||
selectBucket={selectBucket}
|
||||
/>
|
||||
)
|
||||
wrapper.setState({
|
||||
isEditing: true,
|
||||
path: "test2/dir1/"
|
||||
})
|
||||
wrapper.find("form").simulate("submit", { preventDefault: jest.fn() })
|
||||
expect(selectBucket).toHaveBeenCalledWith("test2", "dir1/")
|
||||
})
|
||||
|
||||
it("should create a new bucket if bucket typed in path doesn't exist", () => {
|
||||
const makeBucket = jest.fn()
|
||||
const buckets = ["test1", "test2"]
|
||||
const wrapper = mount(
|
||||
<Path
|
||||
buckets={buckets}
|
||||
currentBucket={"test1"}
|
||||
currentPrefix={""}
|
||||
makeBucket={makeBucket}
|
||||
/>
|
||||
)
|
||||
wrapper.setState({
|
||||
isEditing: true,
|
||||
path: "test3/dir1/"
|
||||
})
|
||||
wrapper.find("form").simulate("submit", { preventDefault: jest.fn() })
|
||||
expect(makeBucket).toHaveBeenCalledWith("test3")
|
||||
})
|
||||
|
||||
it("should not make or select bucket if path doesn't point to bucket", () => {
|
||||
const makeBucket = jest.fn()
|
||||
const selectBucket = jest.fn()
|
||||
const buckets = ["test1", "test2"]
|
||||
const wrapper = mount(
|
||||
<Path
|
||||
buckets={buckets}
|
||||
currentBucket={"test1"}
|
||||
currentPrefix={""}
|
||||
makeBucket={makeBucket}
|
||||
selectBucket={selectBucket}
|
||||
/>
|
||||
)
|
||||
wrapper.setState({
|
||||
isEditing: true,
|
||||
path: "//dir1/dir2/"
|
||||
})
|
||||
wrapper.find("form").simulate("submit", { preventDefault: jest.fn() })
|
||||
expect(makeBucket).not.toHaveBeenCalled()
|
||||
expect(selectBucket).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -33,12 +33,12 @@ export class AbortConfirmModal extends React.Component {
|
||||
"abort-upload": true
|
||||
})
|
||||
let okIcon = classNames({
|
||||
fa: true,
|
||||
fas: true,
|
||||
"fa-times": true
|
||||
})
|
||||
let cancelIcon = classNames({
|
||||
fa: true,
|
||||
"fa-cloud-upload": true
|
||||
fas: true,
|
||||
"fa-cloud-upload-alt": true
|
||||
})
|
||||
|
||||
return (
|
||||
@@ -46,7 +46,7 @@ export class AbortConfirmModal extends React.Component {
|
||||
show={true}
|
||||
baseClass={baseClass}
|
||||
text="Abort uploads in progress?"
|
||||
icon="fa fa-info-circle mci-amber"
|
||||
icon="fas fa-info-circle mci-amber"
|
||||
sub="This cannot be undone!"
|
||||
okText="Abort"
|
||||
okIcon={okIcon}
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
.form-control--path {
|
||||
color: @link-color;
|
||||
padding: 5px 5px 6px 0;
|
||||
font-size: 16px;
|
||||
.placeholder(@text-muted-color)
|
||||
}
|
||||
|
||||
select.form-control {
|
||||
-webkit-appearance: none;
|
||||
-moz-appearance: none;
|
||||
@@ -153,7 +160,7 @@ select.form-control {
|
||||
}
|
||||
&:after {
|
||||
content: "\f05a";
|
||||
font-family: FontAwesome;
|
||||
font-family: 'Font Awesome 5 Free';
|
||||
position: absolute;
|
||||
top: 17px;
|
||||
right: 9px;
|
||||
@@ -165,6 +172,7 @@ select.form-control {
|
||||
.ig-search {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
font-weight: 900;
|
||||
content: '\f002';
|
||||
font-size: 15px;
|
||||
position: absolute;
|
||||
|
||||
@@ -16,6 +16,10 @@
|
||||
font-weight: normal;
|
||||
margin: 0;
|
||||
|
||||
@media(min-width: (@screen-md-min)) {
|
||||
width: calc(100% - 60px);
|
||||
}
|
||||
|
||||
& > span {
|
||||
margin-bottom: 7px;
|
||||
display: inline-block;
|
||||
@@ -27,7 +31,7 @@
|
||||
color: @text-color;
|
||||
}
|
||||
}
|
||||
&:last-child {
|
||||
&:last-of-type {
|
||||
&:after {
|
||||
content: '/';
|
||||
margin: 0 4px;
|
||||
@@ -43,6 +47,19 @@
|
||||
}
|
||||
|
||||
|
||||
/*--------------------------
|
||||
Edit path
|
||||
----------------------------*/
|
||||
.fe-edit {
|
||||
font-size: 20px;
|
||||
color: @link-color;
|
||||
margin-left: 4px;
|
||||
|
||||
i {
|
||||
vertical-align: middle;
|
||||
}
|
||||
}
|
||||
|
||||
/*--------------------------
|
||||
Disk used
|
||||
----------------------------*/
|
||||
@@ -132,7 +149,7 @@
|
||||
@media(max-width: (@screen-sm-max)) {
|
||||
background: url(../../img/more-h-light.svg) no-repeat center;
|
||||
|
||||
.fa-reorder {
|
||||
.fa-bars {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ div.fesl-row {
|
||||
Icons
|
||||
----------------------------*/
|
||||
&[data-type=folder] {
|
||||
.list-type(#a1d6dd, '\f114');
|
||||
.list-type(#a1d6dd, '\f07b');
|
||||
|
||||
.fesl-item-name {
|
||||
a {
|
||||
@@ -128,8 +128,8 @@ div.fesl-row {
|
||||
&[data-type=excel] { .list-type(#64c866, '\f1c3'); }
|
||||
&[data-type=image] { .list-type(#f06292, '\f1c5'); }
|
||||
&[data-type=video] { .list-type(#f8c363, '\f1c8'); }
|
||||
&[data-type=other] { .list-type(#afafaf, '\f016'); }
|
||||
&[data-type=text] { .list-type(#8a8a8a, '\f0f6'); }
|
||||
&[data-type=other] { .list-type(#afafaf, '\f15b'); }
|
||||
&[data-type=text] { .list-type(#8a8a8a, '\f15c'); }
|
||||
&[data-type=doc] { .list-type(#2196f5, '\f1c2'); }
|
||||
&[data-type=presentation] { .list-type(#896ea6, '\f1c4'); }
|
||||
|
||||
@@ -249,6 +249,7 @@ div.fesl-row {
|
||||
|
||||
&:after {
|
||||
font-family: @font-family-icon;
|
||||
font-weight: 900;
|
||||
content: '\f00c';
|
||||
top: 8px;
|
||||
left: 9px;
|
||||
@@ -449,7 +450,7 @@ div.fesl-row {
|
||||
float: left;
|
||||
padding: 4px 0;
|
||||
|
||||
.fa {
|
||||
.fas {
|
||||
font-size: 22px;
|
||||
vertical-align: top;
|
||||
margin-right: 10px;
|
||||
|
||||
@@ -76,7 +76,7 @@
|
||||
word-wrap: break-word;
|
||||
|
||||
&:before {
|
||||
font-family: FontAwesome;
|
||||
font-family: 'Font Awesome 5 Free';
|
||||
content: '\f0a0';
|
||||
font-size: 17px;
|
||||
position: absolute;
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Base
|
||||
----------------------------*/
|
||||
@font-family-sans-serif : 'Lato', sans-serif;
|
||||
@font-family-icon : 'fontAwesome';
|
||||
@font-family-icon : 'Font Awesome 5 Free';
|
||||
@body-bg : #edecec;
|
||||
@text-color : #8e8e8e;
|
||||
@font-size-base : 15px;
|
||||
|
||||
230
browser/package-lock.json
generated
230
browser/package-lock.json
generated
@@ -82,6 +82,11 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"@fortawesome/fontawesome-free": {
|
||||
"version": "5.10.0",
|
||||
"resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-free/-/fontawesome-free-5.10.0.tgz",
|
||||
"integrity": "sha512-XX16koDMY/tkJmec0VFfKF7RYZOze/203B1iyLnRaAySm3ZPhKaeyIpf73Yh8xhrMk3Fj4TeH3FC01qyFTyg8g=="
|
||||
},
|
||||
"@types/node": {
|
||||
"version": "12.0.7",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-12.0.7.tgz",
|
||||
@@ -891,6 +896,14 @@
|
||||
"requires": {
|
||||
"core-js": "^2.4.0",
|
||||
"regenerator-runtime": "^0.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"core-js": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.9.tgz",
|
||||
"integrity": "sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"regenerator-runtime": {
|
||||
@@ -925,6 +938,14 @@
|
||||
"requires": {
|
||||
"core-js": "^2.4.0",
|
||||
"regenerator-runtime": "^0.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"core-js": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.9.tgz",
|
||||
"integrity": "sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"regenerator-runtime": {
|
||||
@@ -954,6 +975,14 @@
|
||||
"requires": {
|
||||
"core-js": "^2.4.0",
|
||||
"regenerator-runtime": "^0.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"core-js": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.9.tgz",
|
||||
"integrity": "sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"regenerator-runtime": {
|
||||
@@ -996,6 +1025,14 @@
|
||||
"requires": {
|
||||
"core-js": "^2.4.0",
|
||||
"regenerator-runtime": "^0.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"core-js": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.9.tgz",
|
||||
"integrity": "sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"regenerator-runtime": {
|
||||
@@ -1068,6 +1105,14 @@
|
||||
"requires": {
|
||||
"core-js": "^2.4.0",
|
||||
"regenerator-runtime": "^0.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"core-js": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.9.tgz",
|
||||
"integrity": "sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"regenerator-runtime": {
|
||||
@@ -1216,6 +1261,14 @@
|
||||
"requires": {
|
||||
"core-js": "^2.4.0",
|
||||
"regenerator-runtime": "^0.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"core-js": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.9.tgz",
|
||||
"integrity": "sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"regenerator-runtime": {
|
||||
@@ -1332,6 +1385,14 @@
|
||||
"requires": {
|
||||
"core-js": "^2.4.0",
|
||||
"regenerator-runtime": "^0.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"core-js": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.9.tgz",
|
||||
"integrity": "sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"regenerator-runtime": {
|
||||
@@ -1535,6 +1596,14 @@
|
||||
"babel-runtime": "^6.22.0",
|
||||
"core-js": "^2.4.0",
|
||||
"regenerator-runtime": "^0.10.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"core-js": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.9.tgz",
|
||||
"integrity": "sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"babel-preset-es2015": {
|
||||
@@ -1648,6 +1717,13 @@
|
||||
"requires": {
|
||||
"core-js": "^2.4.0",
|
||||
"regenerator-runtime": "^0.10.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"core-js": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.9.tgz",
|
||||
"integrity": "sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"babel-template": {
|
||||
@@ -1671,6 +1747,14 @@
|
||||
"requires": {
|
||||
"core-js": "^2.4.0",
|
||||
"regenerator-runtime": "^0.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"core-js": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.9.tgz",
|
||||
"integrity": "sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"regenerator-runtime": {
|
||||
@@ -1706,6 +1790,14 @@
|
||||
"requires": {
|
||||
"core-js": "^2.4.0",
|
||||
"regenerator-runtime": "^0.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"core-js": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.9.tgz",
|
||||
"integrity": "sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"regenerator-runtime": {
|
||||
@@ -1736,6 +1828,14 @@
|
||||
"requires": {
|
||||
"core-js": "^2.4.0",
|
||||
"regenerator-runtime": "^0.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"core-js": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.9.tgz",
|
||||
"integrity": "sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"regenerator-runtime": {
|
||||
@@ -1884,9 +1984,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"bootstrap": {
|
||||
"version": "3.3.7",
|
||||
"resolved": "https://registry.npmjs.org/bootstrap/-/bootstrap-3.3.7.tgz",
|
||||
"integrity": "sha1-WjiTlFSfIzMIdaOxUGVldPip63E="
|
||||
"version": "3.4.1",
|
||||
"resolved": "https://registry.npmjs.org/bootstrap/-/bootstrap-3.4.1.tgz",
|
||||
"integrity": "sha512-yN5oZVmRCwe5aKwzRj6736nSmKDX7pLYwsXiCj/EYmo16hODaBiT4En5btW/jhBF/seV+XMx3aYwukYC3A49DA=="
|
||||
},
|
||||
"brace-expansion": {
|
||||
"version": "1.1.11",
|
||||
@@ -2878,9 +2978,9 @@
|
||||
}
|
||||
},
|
||||
"core-js": {
|
||||
"version": "2.4.1",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.4.1.tgz",
|
||||
"integrity": "sha1-TekR5mew6ukSTjQlS1OupvxhjT4="
|
||||
"version": "3.2.1",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-3.2.1.tgz",
|
||||
"integrity": "sha512-Qa5XSVefSVPRxy2XfUC13WbvqkxhkwB3ve+pgCQveNgYzbM/UxZeu1dcOX/xr4UmfUd+muuvsaxilQzCyUurMw=="
|
||||
},
|
||||
"core-util-is": {
|
||||
"version": "1.0.2",
|
||||
@@ -4571,11 +4671,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"font-awesome": {
|
||||
"version": "4.7.0",
|
||||
"resolved": "https://registry.npmjs.org/font-awesome/-/font-awesome-4.7.0.tgz",
|
||||
"integrity": "sha1-j6jPBBGhoxr9B7BtKQK7n8gVoTM="
|
||||
},
|
||||
"for-in": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz",
|
||||
@@ -5326,9 +5421,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"handlebars": {
|
||||
"version": "4.1.2",
|
||||
"resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.1.2.tgz",
|
||||
"integrity": "sha512-nvfrjqvt9xQ8Z/w0ijewdD/vvWDTOweBUm96NTr66Wfvo1mJenBLwcYmPs3TIBP5ruzYGD7Hx/DaM9RmhroGPw==",
|
||||
"version": "4.4.0",
|
||||
"resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.4.0.tgz",
|
||||
"integrity": "sha512-xkRtOt3/3DzTKMOt3xahj2M/EqNhY988T+imYSlMgs5fVhLN2fmKVVj0LtEGmb+3UUYV5Qmm1052Mm3dIQxOvw==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"neo-async": "^2.6.0",
|
||||
@@ -5817,12 +5912,6 @@
|
||||
"integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==",
|
||||
"dev": true
|
||||
},
|
||||
"lodash": {
|
||||
"version": "4.17.11",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz",
|
||||
"integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==",
|
||||
"dev": true
|
||||
},
|
||||
"micromatch": {
|
||||
"version": "3.1.10",
|
||||
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz",
|
||||
@@ -6408,12 +6497,6 @@
|
||||
"requires": {
|
||||
"lodash": "^4.17.11"
|
||||
}
|
||||
},
|
||||
"lodash": {
|
||||
"version": "4.17.11",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz",
|
||||
"integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -7352,13 +7435,21 @@
|
||||
"integrity": "sha1-COnxMkhKLEWjCQfp3E1VZ7fxFNc="
|
||||
},
|
||||
"js-yaml": {
|
||||
"version": "3.7.0",
|
||||
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.7.0.tgz",
|
||||
"integrity": "sha1-XJZ93YN6m/3KXy3oQlOr6KHAO4A=",
|
||||
"version": "3.13.1",
|
||||
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz",
|
||||
"integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"argparse": "^1.0.7",
|
||||
"esprima": "^2.6.0"
|
||||
"esprima": "^4.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"esprima": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
|
||||
"integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"jsbn": {
|
||||
@@ -7645,14 +7736,14 @@
|
||||
}
|
||||
},
|
||||
"lodash": {
|
||||
"version": "4.17.11",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz",
|
||||
"integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg=="
|
||||
"version": "4.17.14",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.14.tgz",
|
||||
"integrity": "sha512-mmKYbW3GLuJeX+iGP+Y7Gp1AiGHGbXHCOh/jZmrawMmsE7MS4znI3RL2FsjbqOyMayHInjOeykW7PEajUk1/xw=="
|
||||
},
|
||||
"lodash-es": {
|
||||
"version": "4.17.11",
|
||||
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.11.tgz",
|
||||
"integrity": "sha512-DHb1ub+rMjjrxqlB3H56/6MXtm1lSksDp2rA2cNWjG8mlDUYFhUj3Di2Zn5IwSU87xLv8tNIQ7sSwE/YOX/D/Q=="
|
||||
"version": "4.17.14",
|
||||
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.14.tgz",
|
||||
"integrity": "sha512-7zchRrGa8UZXjD/4ivUWP1867jDkhzTG2c/uj739utSd7O/pFFdxspCemIFKEEjErbcqRzn8nKnGsi7mvTgRPA=="
|
||||
},
|
||||
"lodash._baseisequal": {
|
||||
"version": "3.0.7",
|
||||
@@ -8093,9 +8184,9 @@
|
||||
}
|
||||
},
|
||||
"mixin-deep": {
|
||||
"version": "1.3.1",
|
||||
"resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.1.tgz",
|
||||
"integrity": "sha512-8ZItLHeEgaqEvd5lYBXfm4EZSFCX29Jb9K+lAHhDKzReKBQKj3R+7NOF6tjqYi9t4oI8VUfaWITJQm86wnXGNQ==",
|
||||
"version": "1.3.2",
|
||||
"resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz",
|
||||
"integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==",
|
||||
"requires": {
|
||||
"for-in": "^1.0.2",
|
||||
"is-extendable": "^1.0.1"
|
||||
@@ -10415,9 +10506,9 @@
|
||||
"integrity": "sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA=="
|
||||
},
|
||||
"react-onclickout": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/react-onclickout/-/react-onclickout-2.0.4.tgz",
|
||||
"integrity": "sha1-LHU5pkfh3NyrCyji9Orjw+APDGQ="
|
||||
"version": "2.0.8",
|
||||
"resolved": "https://registry.npmjs.org/react-onclickout/-/react-onclickout-2.0.8.tgz",
|
||||
"integrity": "sha1-0XixP7h6SBNWdhtFSqYN9wabLaQ="
|
||||
},
|
||||
"react-overlays": {
|
||||
"version": "0.8.3",
|
||||
@@ -11078,14 +11169,6 @@
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"lodash": "^4.17.11"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": {
|
||||
"version": "4.17.11",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz",
|
||||
"integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"request-promise-native": {
|
||||
@@ -11668,9 +11751,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"set-value": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.0.tgz",
|
||||
"integrity": "sha512-hw0yxk9GT/Hr5yJEYnHNKYXkIA8mVJgd9ditYZCe16ZczcaELYYcfvaXesNACk2O8O0nTiPQcQhGUQj8JLzeeg==",
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz",
|
||||
"integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==",
|
||||
"requires": {
|
||||
"extend-shallow": "^2.0.1",
|
||||
"is-extendable": "^0.1.1",
|
||||
@@ -12402,6 +12485,18 @@
|
||||
"mkdirp": "~0.5.1",
|
||||
"sax": "~1.2.1",
|
||||
"whet.extend": "~0.9.9"
|
||||
},
|
||||
"dependencies": {
|
||||
"js-yaml": {
|
||||
"version": "3.7.0",
|
||||
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.7.0.tgz",
|
||||
"integrity": "sha1-XJZ93YN6m/3KXy3oQlOr6KHAO4A=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"argparse": "^1.0.7",
|
||||
"esprima": "^2.6.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"symbol-observable": {
|
||||
@@ -12937,35 +13032,14 @@
|
||||
}
|
||||
},
|
||||
"union-value": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.0.tgz",
|
||||
"integrity": "sha1-XHHDTLW61dzr4+oM0IIHulqhrqQ=",
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz",
|
||||
"integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==",
|
||||
"requires": {
|
||||
"arr-union": "^3.1.0",
|
||||
"get-value": "^2.0.6",
|
||||
"is-extendable": "^0.1.1",
|
||||
"set-value": "^0.4.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"extend-shallow": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
|
||||
"integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
|
||||
"requires": {
|
||||
"is-extendable": "^0.1.0"
|
||||
}
|
||||
},
|
||||
"set-value": {
|
||||
"version": "0.4.3",
|
||||
"resolved": "https://registry.npmjs.org/set-value/-/set-value-0.4.3.tgz",
|
||||
"integrity": "sha1-fbCPnT0i3H945Trzw79GZuzfzPE=",
|
||||
"requires": {
|
||||
"extend-shallow": "^2.0.1",
|
||||
"is-extendable": "^0.1.1",
|
||||
"is-plain-object": "^2.0.1",
|
||||
"to-object-path": "^0.3.0"
|
||||
}
|
||||
}
|
||||
"set-value": "^2.0.1"
|
||||
}
|
||||
},
|
||||
"uniq": {
|
||||
|
||||
@@ -59,10 +59,11 @@
|
||||
"webpack-dev-server": "^3.1.14"
|
||||
},
|
||||
"dependencies": {
|
||||
"bootstrap": "^3.3.6",
|
||||
"@fortawesome/fontawesome-free": "^5.10.0",
|
||||
"bootstrap": "^3.4.1",
|
||||
"classnames": "^2.2.3",
|
||||
"core-js": "^3.2.1",
|
||||
"expect": "^1.20.2",
|
||||
"font-awesome": "^4.7.0",
|
||||
"glob-all": "^3.1.0",
|
||||
"history": "^4.7.2",
|
||||
"humanize": "0.0.9",
|
||||
@@ -82,7 +83,7 @@
|
||||
"react-dom": "^16.2.0",
|
||||
"react-dropzone": "^4.2.3",
|
||||
"react-infinite-scroller": "^1.0.6",
|
||||
"react-onclickout": "2.0.4",
|
||||
"react-onclickout": "^2.0.8",
|
||||
"react-redux": "^5.0.6",
|
||||
"react-router-dom": "^4.2.0",
|
||||
"redux": "^3.7.2",
|
||||
|
||||
File diff suppressed because one or more lines are too long
9011
browser/yarn.lock
9011
browser/yarn.lock
File diff suppressed because it is too large
Load Diff
@@ -21,7 +21,7 @@ _init() {
|
||||
|
||||
## Minimum required versions for build dependencies
|
||||
GIT_VERSION="1.0"
|
||||
GO_VERSION="1.12"
|
||||
GO_VERSION="1.13"
|
||||
OSX_VERSION="10.8"
|
||||
KNAME=$(uname -s)
|
||||
ARCH=$(uname -m)
|
||||
@@ -119,7 +119,7 @@ assert_is_supported_os() {
|
||||
|
||||
assert_check_golang_env() {
|
||||
if ! which go >/dev/null 2>&1; then
|
||||
echo "Cannot find go binary in your PATH configuration, please refer to Go installation document at https://docs.min.io/docs/how-to-install-golang"
|
||||
echo "Cannot find go binary in your PATH configuration, please refer to Go installation document at https://golang.org/doc/install"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
@@ -83,7 +84,9 @@ var (
|
||||
"durable": false,
|
||||
"internal": false,
|
||||
"noWait": false,
|
||||
"autoDeleted": false
|
||||
"autoDeleted": false,
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"elasticsearch": {
|
||||
@@ -91,7 +94,9 @@ var (
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"url": "",
|
||||
"index": ""
|
||||
"index": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"kafka": {
|
||||
@@ -137,7 +142,9 @@ var (
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
"database": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
@@ -150,6 +157,8 @@ var (
|
||||
"token": "",
|
||||
"secure": false,
|
||||
"pingInterval": 0,
|
||||
"queueDir": "",
|
||||
"queueLimit": 0,
|
||||
"streaming": {
|
||||
"enable": false,
|
||||
"clusterID": "",
|
||||
@@ -166,7 +175,9 @@ var (
|
||||
"tls": {
|
||||
"enable": false,
|
||||
"skipVerify": false
|
||||
}
|
||||
},
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"postgresql": {
|
||||
@@ -179,7 +190,9 @@ var (
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
"database": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
@@ -188,13 +201,17 @@ var (
|
||||
"format": "namespace",
|
||||
"address": "",
|
||||
"password": "",
|
||||
"key": ""
|
||||
"key": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"webhook": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"endpoint": ""
|
||||
"endpoint": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -277,11 +294,16 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
globalIAMSys = NewIAMSys()
|
||||
globalIAMSys.Init(objLayer)
|
||||
|
||||
buckets, err := objLayer.ListBuckets(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
globalPolicySys = NewPolicySys()
|
||||
globalPolicySys.Init(objLayer)
|
||||
globalPolicySys.Init(buckets, objLayer)
|
||||
|
||||
globalNotificationSys = NewNotificationSys(globalServerConfig, globalEndpoints)
|
||||
globalNotificationSys.Init(objLayer)
|
||||
globalNotificationSys.Init(buckets, objLayer)
|
||||
|
||||
// Setup admin mgmt REST API handlers.
|
||||
adminRouter := mux.NewRouter()
|
||||
@@ -328,117 +350,35 @@ func initTestXLObjLayer() (ObjectLayer, []string, error) {
|
||||
return objLayer, xlDirs, nil
|
||||
}
|
||||
|
||||
func TestAdminVersionHandler(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
req, err := newTestRequest("GET", "/minio/admin/version", 0, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct request - %v", err)
|
||||
}
|
||||
cred := globalServerConfig.GetCredential()
|
||||
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to sign request - %v", err)
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if http.StatusOK != rec.Code {
|
||||
t.Errorf("Unexpected status code - got %d but expected %d",
|
||||
rec.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
var result madmin.AdminAPIVersionInfo
|
||||
err = json.NewDecoder(rec.Body).Decode(&result)
|
||||
if err != nil {
|
||||
t.Errorf("json parse err: %v", err)
|
||||
}
|
||||
|
||||
if result != adminAPIVersionInfo {
|
||||
t.Errorf("unexpected version: %v", result)
|
||||
}
|
||||
}
|
||||
|
||||
// cmdType - Represents different service subcomands like status, stop
|
||||
// and restart.
|
||||
type cmdType int
|
||||
|
||||
const (
|
||||
statusCmd cmdType = iota
|
||||
restartCmd
|
||||
restartCmd cmdType = iota
|
||||
stopCmd
|
||||
setCreds
|
||||
)
|
||||
|
||||
// String - String representation for cmdType
|
||||
func (c cmdType) String() string {
|
||||
switch c {
|
||||
case statusCmd:
|
||||
return "status"
|
||||
case restartCmd:
|
||||
return "restart"
|
||||
case stopCmd:
|
||||
return "stop"
|
||||
case setCreds:
|
||||
return "set-credentials"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// apiMethod - Returns the HTTP method corresponding to the admin REST
|
||||
// API for a given cmdType value.
|
||||
func (c cmdType) apiMethod() string {
|
||||
switch c {
|
||||
case statusCmd:
|
||||
return "GET"
|
||||
case restartCmd:
|
||||
return "POST"
|
||||
case stopCmd:
|
||||
return "POST"
|
||||
case setCreds:
|
||||
return "PUT"
|
||||
}
|
||||
return "GET"
|
||||
}
|
||||
|
||||
// apiEndpoint - Return endpoint for each admin REST API mapped to a
|
||||
// command here.
|
||||
func (c cmdType) apiEndpoint() string {
|
||||
switch c {
|
||||
case statusCmd, restartCmd, stopCmd:
|
||||
return "/minio/admin/v1/service"
|
||||
case setCreds:
|
||||
return "/minio/admin/v1/config/credential"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// toServiceSignal - Helper function that translates a given cmdType
|
||||
// value to its corresponding serviceSignal value.
|
||||
func (c cmdType) toServiceSignal() serviceSignal {
|
||||
switch c {
|
||||
case statusCmd:
|
||||
return serviceStatus
|
||||
case restartCmd:
|
||||
return serviceRestart
|
||||
case stopCmd:
|
||||
return serviceStop
|
||||
}
|
||||
return serviceStatus
|
||||
return serviceRestart
|
||||
}
|
||||
|
||||
func (c cmdType) toServiceActionValue() madmin.ServiceActionValue {
|
||||
func (c cmdType) toServiceAction() madmin.ServiceAction {
|
||||
switch c {
|
||||
case restartCmd:
|
||||
return madmin.ServiceActionValueRestart
|
||||
return madmin.ServiceActionRestart
|
||||
case stopCmd:
|
||||
return madmin.ServiceActionValueStop
|
||||
return madmin.ServiceActionStop
|
||||
}
|
||||
return madmin.ServiceActionValueStop
|
||||
return madmin.ServiceActionRestart
|
||||
}
|
||||
|
||||
// testServiceSignalReceiver - Helper function that simulates a
|
||||
@@ -453,19 +393,15 @@ func testServiceSignalReceiver(cmd cmdType, t *testing.T) {
|
||||
|
||||
// getServiceCmdRequest - Constructs a management REST API request for service
|
||||
// subcommands for a given cmdType value.
|
||||
func getServiceCmdRequest(cmd cmdType, cred auth.Credentials, body []byte) (*http.Request, error) {
|
||||
req, err := newTestRequest(cmd.apiMethod(), cmd.apiEndpoint(), 0, nil)
|
||||
func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, error) {
|
||||
queryVal := url.Values{}
|
||||
queryVal.Set("action", string(cmd.toServiceAction()))
|
||||
resource := "/minio/admin/v1/service?" + queryVal.Encode()
|
||||
req, err := newTestRequest(http.MethodPost, resource, 0, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set body
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(body))
|
||||
req.ContentLength = int64(len(body))
|
||||
|
||||
// Set sha-sum header
|
||||
req.Header.Set("X-Amz-Content-Sha256", getSHA256Hash(body))
|
||||
|
||||
// management REST API uses signature V4 for authentication.
|
||||
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
|
||||
if err != nil {
|
||||
@@ -501,13 +437,7 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
}
|
||||
credentials := globalServerConfig.GetCredential()
|
||||
|
||||
body, err := json.Marshal(madmin.ServiceAction{
|
||||
Action: cmd.toServiceActionValue()})
|
||||
if err != nil {
|
||||
t.Fatalf("JSONify error: %v", err)
|
||||
}
|
||||
|
||||
req, err := getServiceCmdRequest(cmd, credentials, body)
|
||||
req, err := getServiceCmdRequest(cmd, credentials)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build service status request %v", err)
|
||||
}
|
||||
@@ -515,19 +445,6 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
|
||||
if cmd == statusCmd {
|
||||
expectedInfo := madmin.ServiceStatus{
|
||||
ServerVersion: madmin.ServerVersion{Version: Version, CommitID: CommitID},
|
||||
}
|
||||
receivedInfo := madmin.ServiceStatus{}
|
||||
if jsonErr := json.Unmarshal(rec.Body.Bytes(), &receivedInfo); jsonErr != nil {
|
||||
t.Errorf("Failed to unmarshal StorageInfo - %v", jsonErr)
|
||||
}
|
||||
if expectedInfo.ServerVersion != receivedInfo.ServerVersion {
|
||||
t.Errorf("Expected storage info and received storage info differ, %v %v", expectedInfo, receivedInfo)
|
||||
}
|
||||
}
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
resp, _ := ioutil.ReadAll(rec.Body)
|
||||
t.Errorf("Expected to receive %d status code but received %d. Body (%s)",
|
||||
@@ -538,11 +455,6 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Test for service status management REST API.
|
||||
func TestServiceStatusHandler(t *testing.T) {
|
||||
testServicesCmdHandler(statusCmd, t)
|
||||
}
|
||||
|
||||
// Test for service restart management REST API.
|
||||
func TestServiceRestartHandler(t *testing.T) {
|
||||
testServicesCmdHandler(restartCmd, t)
|
||||
@@ -704,9 +616,6 @@ func TestAdminServerInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, serverInfo := range results {
|
||||
if len(serverInfo.Addr) == 0 {
|
||||
t.Error("Expected server address to be non empty")
|
||||
}
|
||||
if serverInfo.Error != "" {
|
||||
t.Errorf("Unexpected error = %v\n", serverInfo.Error)
|
||||
}
|
||||
@@ -747,3 +656,100 @@ func TestToAdminAPIErrCode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopLockEntries(t *testing.T) {
|
||||
t1 := UTCNow()
|
||||
t2 := UTCNow().Add(10 * time.Second)
|
||||
peerLocks := []*PeerLocks{
|
||||
{
|
||||
Addr: "1",
|
||||
Locks: map[string][]lockRequesterInfo{
|
||||
"1": {
|
||||
{false, "node2", "ep2", "2", t2, t2, ""},
|
||||
{true, "node1", "ep1", "1", t1, t1, ""},
|
||||
},
|
||||
"2": {
|
||||
{false, "node2", "ep2", "2", t2, t2, ""},
|
||||
{true, "node1", "ep1", "1", t1, t1, ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Addr: "2",
|
||||
Locks: map[string][]lockRequesterInfo{
|
||||
"1": {
|
||||
{false, "node2", "ep2", "2", t2, t2, ""},
|
||||
{true, "node1", "ep1", "1", t1, t1, ""},
|
||||
},
|
||||
"2": {
|
||||
{false, "node2", "ep2", "2", t2, t2, ""},
|
||||
{true, "node1", "ep1", "1", t1, t1, ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
les := topLockEntries(peerLocks)
|
||||
if len(les) != 2 {
|
||||
t.Fatalf("Did not get 2 results")
|
||||
}
|
||||
if les[0].Timestamp.After(les[1].Timestamp) {
|
||||
t.Fatalf("Got wrong sorted value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractHealInitParams(t *testing.T) {
|
||||
mkParams := func(clientToken string, forceStart, forceStop bool) url.Values {
|
||||
v := url.Values{}
|
||||
if clientToken != "" {
|
||||
v.Add(string(mgmtClientToken), clientToken)
|
||||
}
|
||||
if forceStart {
|
||||
v.Add(string(mgmtForceStart), "")
|
||||
}
|
||||
if forceStop {
|
||||
v.Add(string(mgmtForceStop), "")
|
||||
}
|
||||
return v
|
||||
}
|
||||
qParmsArr := []url.Values{
|
||||
// Invalid cases
|
||||
mkParams("", true, true),
|
||||
mkParams("111", true, true),
|
||||
mkParams("111", true, false),
|
||||
mkParams("111", false, true),
|
||||
// Valid cases follow
|
||||
mkParams("", true, false),
|
||||
mkParams("", false, true),
|
||||
mkParams("", false, false),
|
||||
mkParams("111", false, false),
|
||||
}
|
||||
varsArr := []map[string]string{
|
||||
// Invalid cases
|
||||
{string(mgmtPrefix): "objprefix"},
|
||||
// Valid cases
|
||||
{},
|
||||
{string(mgmtBucket): "bucket"},
|
||||
{string(mgmtBucket): "bucket", string(mgmtPrefix): "objprefix"},
|
||||
}
|
||||
|
||||
// Body is always valid - we do not test JSON decoding.
|
||||
body := `{"recursive": false, "dryRun": true, "remove": false, "scanMode": 0}`
|
||||
|
||||
// Test all combinations!
|
||||
for pIdx, parms := range qParmsArr {
|
||||
for vIdx, vars := range varsArr {
|
||||
_, err := extractHealInitParams(vars, parms, bytes.NewBuffer([]byte(body)))
|
||||
isErrCase := false
|
||||
if pIdx < 4 || vIdx < 1 {
|
||||
isErrCase = true
|
||||
}
|
||||
|
||||
if err != ErrNone && !isErrCase {
|
||||
t.Errorf("Got unexpected error: %v %v %v", pIdx, vIdx, err)
|
||||
} else if err == ErrNone && isErrCase {
|
||||
t.Errorf("Got no error but expected one: %v %v", pIdx, vIdx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -587,9 +587,9 @@ func (h *healSequence) healItemsFromSourceCh() error {
|
||||
|
||||
var itemType madmin.HealItemType
|
||||
switch {
|
||||
case path == "/":
|
||||
case path == SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
case !strings.Contains(path, "/"):
|
||||
case !strings.Contains(path, SlashSeparator):
|
||||
itemType = madmin.HealItemBucket
|
||||
default:
|
||||
itemType = madmin.HealItemObject
|
||||
@@ -693,7 +693,7 @@ func (h *healSequence) healDiskFormat() error {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
return h.queueHealTask("/", madmin.HealItemMetadata)
|
||||
return h.queueHealTask(SlashSeparator, madmin.HealItemMetadata)
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
|
||||
@@ -38,17 +38,14 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
adminRouter := router.PathPrefix(adminAPIPathPrefix).Subrouter()
|
||||
|
||||
// Version handler
|
||||
adminRouter.Methods(http.MethodGet).Path("/version").HandlerFunc(httpTraceAll(adminAPI.VersionHandler))
|
||||
|
||||
adminV1Router := adminRouter.PathPrefix("/v1").Subrouter()
|
||||
|
||||
/// Service operations
|
||||
|
||||
// Service status
|
||||
adminV1Router.Methods(http.MethodGet).Path("/service").HandlerFunc(httpTraceAll(adminAPI.ServiceStatusHandler))
|
||||
|
||||
// Service restart and stop - TODO
|
||||
adminV1Router.Methods(http.MethodPost).Path("/service").HandlerFunc(httpTraceAll(adminAPI.ServiceStopNRestartHandler))
|
||||
// Restart and stop MinIO service.
|
||||
adminV1Router.Methods(http.MethodPost).Path("/service").HandlerFunc(httpTraceAll(adminAPI.ServiceActionHandler)).Queries("action", "{action:.*}")
|
||||
// Update MinIO servers.
|
||||
adminV1Router.Methods(http.MethodPost).Path("/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
|
||||
|
||||
// Info operations
|
||||
adminV1Router.Methods(http.MethodGet).Path("/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
|
||||
@@ -80,11 +77,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
|
||||
|
||||
// Get config keys/values
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKeysHandler))
|
||||
// Set config keys/values
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKeysHandler))
|
||||
}
|
||||
|
||||
if enableIAMOps {
|
||||
@@ -96,20 +88,41 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
// Add user IAM
|
||||
adminV1Router.Methods(http.MethodPut).Path("/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-policy").HandlerFunc(httpTraceHdrs(adminAPI.SetUserPolicy)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("name", "{name:.*}")
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// Info policy IAM
|
||||
adminV1Router.Methods(http.MethodGet).Path("/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Remove policy IAM
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Set user or group policy
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-or-group-policy").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
|
||||
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
|
||||
|
||||
// Remove user IAM
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// List users
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
|
||||
// User info
|
||||
adminV1Router.Methods(http.MethodGet).Path("/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Add/Remove members from group
|
||||
adminV1Router.Methods(http.MethodPut).Path("/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
|
||||
|
||||
// Get Group
|
||||
adminV1Router.Methods(http.MethodGet).Path("/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
|
||||
|
||||
// List Groups
|
||||
adminV1Router.Methods(http.MethodGet).Path("/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
|
||||
|
||||
// Set Group Status
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// List policies
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
}
|
||||
@@ -120,6 +133,14 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
// HTTP Trace
|
||||
adminV1Router.Methods(http.MethodGet).Path("/trace").HandlerFunc(adminAPI.TraceHandler)
|
||||
|
||||
// Console Logs
|
||||
adminV1Router.Methods(http.MethodGet).Path("/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
|
||||
|
||||
// -- KMS APIs --
|
||||
//
|
||||
adminV1Router.Methods(http.MethodGet).Path("/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
|
||||
|
||||
// If none of the routes match, return error.
|
||||
adminV1Router.NotFoundHandler = http.HandlerFunc(httpTraceHdrs(notFoundHandlerJSON))
|
||||
}
|
||||
|
||||
114
cmd/admin-server-info.go
Normal file
114
cmd/admin-server-info.go
Normal file
@@ -0,0 +1,114 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/pkg/cpu"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/mem"
|
||||
)
|
||||
|
||||
// getLocalMemUsage - returns ServerMemUsageInfo for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalMemUsage(endpoints EndpointList, r *http.Request) ServerMemUsageInfo {
|
||||
var memUsages []mem.Usage
|
||||
var historicUsages []mem.Usage
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, endpoint := range endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
memUsages = append(memUsages, mem.GetUsage())
|
||||
historicUsages = append(historicUsages, mem.GetHistoricUsage())
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpoints)
|
||||
}
|
||||
return ServerMemUsageInfo{
|
||||
Addr: addr,
|
||||
Usage: memUsages,
|
||||
HistoricUsage: historicUsages,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalCPULoad - returns ServerCPULoadInfo for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalCPULoad(endpoints EndpointList, r *http.Request) ServerCPULoadInfo {
|
||||
var cpuLoads []cpu.Load
|
||||
var historicLoads []cpu.Load
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, endpoint := range endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuLoads = append(cpuLoads, cpu.GetLoad())
|
||||
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpoints)
|
||||
}
|
||||
return ServerCPULoadInfo{
|
||||
Addr: addr,
|
||||
Load: cpuLoads,
|
||||
HistoricLoad: historicLoads,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalDrivesPerf - returns ServerDrivesPerfInfo for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalDrivesPerf(endpoints EndpointList, size int64, r *http.Request) madmin.ServerDrivesPerfInfo {
|
||||
var dps []disk.Performance
|
||||
for _, endpoint := range endpoints {
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
if _, err := os.Stat(endpoint.Path); err != nil {
|
||||
// Since this drive is not available, add relevant details and proceed
|
||||
dps = append(dps, disk.Performance{Path: endpoint.Path, Error: err.Error()})
|
||||
continue
|
||||
}
|
||||
dp := disk.GetPerformance(pathJoin(endpoint.Path, minioMetaTmpBucket, mustGetUUID()), size)
|
||||
dp.Path = endpoint.Path
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpoints)
|
||||
}
|
||||
return madmin.ServerDrivesPerfInfo{
|
||||
Addr: addr,
|
||||
Perf: dps,
|
||||
Size: size,
|
||||
}
|
||||
}
|
||||
@@ -20,13 +20,6 @@ import (
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
const (
|
||||
// Response request id.
|
||||
responseRequestIDKey = "x-amz-request-id"
|
||||
// Deployment id.
|
||||
responseDeploymentIDKey = "x-minio-deployment-id"
|
||||
)
|
||||
|
||||
// ObjectIdentifier carries key name for the object to delete.
|
||||
type ObjectIdentifier struct {
|
||||
ObjectName string `xml:"Key"`
|
||||
|
||||
@@ -92,6 +92,7 @@ const (
|
||||
ErrMissingRequestBodyError
|
||||
ErrNoSuchBucket
|
||||
ErrNoSuchBucketPolicy
|
||||
ErrNoSuchBucketLifecycle
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrNoSuchVersion
|
||||
@@ -139,6 +140,7 @@ const (
|
||||
ErrSlowDown
|
||||
ErrInvalidPrefixMarker
|
||||
ErrBadRequest
|
||||
ErrKeyTooLongError
|
||||
// Add new error codes here.
|
||||
|
||||
// SSE-S3 related API errors
|
||||
@@ -187,6 +189,7 @@ const (
|
||||
ErrRequestBodyParse
|
||||
ErrObjectExistsAsDirectory
|
||||
ErrInvalidObjectName
|
||||
ErrInvalidObjectNamePrefixSlash
|
||||
ErrInvalidResourceName
|
||||
ErrServerNotInitialized
|
||||
ErrOperationTimedOut
|
||||
@@ -200,6 +203,8 @@ const (
|
||||
|
||||
ErrMalformedJSON
|
||||
ErrAdminNoSuchUser
|
||||
ErrAdminNoSuchGroup
|
||||
ErrAdminGroupNotEmpty
|
||||
ErrAdminNoSuchPolicy
|
||||
ErrAdminInvalidArgument
|
||||
ErrAdminInvalidAccessKey
|
||||
@@ -310,6 +315,7 @@ const (
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrPostPolicyConditionInvalidFormat
|
||||
)
|
||||
|
||||
type errorCodeMap map[APIErrorCode]APIError
|
||||
@@ -465,6 +471,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The bucket policy does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchBucketLifecycle: {
|
||||
Code: "NoSuchBucketLifecycle",
|
||||
Description: "The bucket lifecycle configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchKey: {
|
||||
Code: "NoSuchKey",
|
||||
Description: "The specified key does not exist.",
|
||||
@@ -682,6 +693,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "400 BadRequest",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrKeyTooLongError: {
|
||||
Code: "KeyTooLongError",
|
||||
Description: "Your key is too long",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// FIXME: Actual XML error response also contains the header which missed in list of signed header parameters.
|
||||
ErrUnsignedHeaders: {
|
||||
@@ -885,6 +901,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Object name contains unsupported characters.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidObjectNamePrefixSlash: {
|
||||
Code: "XMinioInvalidObjectName",
|
||||
Description: "Object name contains a leading slash.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidResourceName: {
|
||||
Code: "XMinioInvalidResourceName",
|
||||
Description: "Resource name contains bad components such as \"..\" or \".\".",
|
||||
@@ -905,6 +926,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The specified user does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminNoSuchGroup: {
|
||||
Code: "XMinioAdminNoSuchGroup",
|
||||
Description: "The specified group does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminGroupNotEmpty: {
|
||||
Code: "XMinioAdminGroupNotEmpty",
|
||||
Description: "The specified group is not empty - cannot remove it.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchPolicy: {
|
||||
Code: "XMinioAdminNoSuchPolicy",
|
||||
Description: "The canned policy does not exist.",
|
||||
@@ -1466,6 +1497,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "User is not allowed to be same as admin access key",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrPostPolicyConditionInvalidFormat: {
|
||||
Code: "PostPolicyInvalidKeyName",
|
||||
Description: "Invalid according to Policy: Policy Condition failed",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
// Add your error structure here.
|
||||
}
|
||||
|
||||
@@ -1482,6 +1518,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrAdminInvalidArgument
|
||||
case errNoSuchUser:
|
||||
apiErr = ErrAdminNoSuchUser
|
||||
case errNoSuchGroup:
|
||||
apiErr = ErrAdminNoSuchGroup
|
||||
case errGroupNotEmpty:
|
||||
apiErr = ErrAdminGroupNotEmpty
|
||||
case errNoSuchPolicy:
|
||||
apiErr = ErrAdminNoSuchPolicy
|
||||
case errSignatureMismatch:
|
||||
@@ -1492,6 +1532,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrEntityTooLarge
|
||||
case errDataTooSmall:
|
||||
apiErr = ErrEntityTooSmall
|
||||
case errAuthentication:
|
||||
apiErr = ErrAccessDenied
|
||||
case auth.ErrInvalidAccessKeyLength:
|
||||
apiErr = ErrAdminInvalidAccessKey
|
||||
case auth.ErrInvalidSecretKeyLength:
|
||||
@@ -1579,6 +1621,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case ObjectNameInvalid:
|
||||
apiErr = ErrInvalidObjectName
|
||||
case ObjectNamePrefixAsSlash:
|
||||
apiErr = ErrInvalidObjectNamePrefixSlash
|
||||
case InvalidUploadID:
|
||||
apiErr = ErrNoSuchUpload
|
||||
case InvalidPart:
|
||||
@@ -1613,6 +1657,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrUnsupportedMetadata
|
||||
case BucketPolicyNotFound:
|
||||
apiErr = ErrNoSuchBucketPolicy
|
||||
case BucketLifecycleNotFound:
|
||||
apiErr = ErrNoSuchBucketLifecycle
|
||||
case *event.ErrInvalidEventName:
|
||||
apiErr = ErrEventNotification
|
||||
case *event.ErrInvalidARN:
|
||||
@@ -1639,6 +1685,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrBackendDown
|
||||
case crypto.Error:
|
||||
apiErr = ErrObjectTampered
|
||||
case ObjectNameTooLong:
|
||||
apiErr = ErrKeyTooLongError
|
||||
default:
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
@@ -1681,6 +1729,12 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
// their internal error types. This code is only
|
||||
// useful with gateway implementations.
|
||||
switch e := err.(type) {
|
||||
case crypto.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XKMSInternalError",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case minio.ErrorResponse:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
||||
// Returns a hexadecimal representation of time at the
|
||||
@@ -36,13 +37,13 @@ func mustGetRequestID(t time.Time) string {
|
||||
|
||||
// Write http common headers
|
||||
func setCommonHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set("Server", "MinIO/"+ReleaseTag)
|
||||
w.Header().Set(xhttp.ServerInfo, "MinIO/"+ReleaseTag)
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerConfig.GetRegion(); region != "" {
|
||||
w.Header().Set("X-Amz-Bucket-Region", region)
|
||||
w.Header().Set(xhttp.AmzBucketRegion, region)
|
||||
}
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
w.Header().Set(xhttp.AcceptRanges, "bytes")
|
||||
|
||||
// Remove sensitive information
|
||||
crypto.RemoveSensitiveHeaders(w.Header())
|
||||
@@ -72,23 +73,23 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
|
||||
// Set last modified time.
|
||||
lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat)
|
||||
w.Header().Set("Last-Modified", lastModified)
|
||||
w.Header().Set(xhttp.LastModified, lastModified)
|
||||
|
||||
// Set Etag if available.
|
||||
if objInfo.ETag != "" {
|
||||
w.Header()["ETag"] = []string{"\"" + objInfo.ETag + "\""}
|
||||
w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""}
|
||||
}
|
||||
|
||||
if objInfo.ContentType != "" {
|
||||
w.Header().Set("Content-Type", objInfo.ContentType)
|
||||
w.Header().Set(xhttp.ContentType, objInfo.ContentType)
|
||||
}
|
||||
|
||||
if objInfo.ContentEncoding != "" {
|
||||
w.Header().Set("Content-Encoding", objInfo.ContentEncoding)
|
||||
w.Header().Set(xhttp.ContentEncoding, objInfo.ContentEncoding)
|
||||
}
|
||||
|
||||
if !objInfo.Expires.IsZero() {
|
||||
w.Header().Set("Expires", objInfo.Expires.UTC().Format(http.TimeFormat))
|
||||
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// Set all other user defined metadata.
|
||||
@@ -124,10 +125,10 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
}
|
||||
|
||||
// Set content length.
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(rangeLen, 10))
|
||||
w.Header().Set(xhttp.ContentLength, strconv.FormatInt(rangeLen, 10))
|
||||
if rs != nil {
|
||||
contentRange := fmt.Sprintf("bytes %d-%d/%d", start, start+rangeLen-1, totalObjectSize)
|
||||
w.Header().Set("Content-Range", contentRange)
|
||||
w.Header().Set(xhttp.ContentRange, contentRange)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
@@ -42,6 +43,27 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
|
||||
return
|
||||
}
|
||||
|
||||
func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType, versionIDMarker string, errCode APIErrorCode) {
|
||||
errCode = ErrNone
|
||||
|
||||
if values.Get("max-keys") != "" {
|
||||
var err error
|
||||
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
|
||||
errCode = ErrInvalidMaxKeys
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("key-marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
versionIDMarker = values.Get("version-id-marker")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse bucket url queries for ListObjects V2.
|
||||
func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int, encodingType string, errCode APIErrorCode) {
|
||||
errCode = ErrNone
|
||||
@@ -65,11 +87,19 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
token = values.Get("continuation-token")
|
||||
startAfter = values.Get("start-after")
|
||||
delimiter = values.Get("delimiter")
|
||||
fetchOwner = values.Get("fetch-owner") == "true"
|
||||
encodingType = values.Get("encoding-type")
|
||||
|
||||
if token = values.Get("continuation-token"); token != "" {
|
||||
decodedToken, err := base64.StdEncoding.DecodeString(token)
|
||||
if err != nil {
|
||||
errCode = ErrIncorrectContinuationToken
|
||||
return
|
||||
}
|
||||
token = string(decodedToken)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -34,9 +34,9 @@ func TestListObjectsV2Resources(t *testing.T) {
|
||||
{
|
||||
values: url.Values{
|
||||
"prefix": []string{"photos/"},
|
||||
"continuation-token": []string{"token"},
|
||||
"continuation-token": []string{"dG9rZW4="},
|
||||
"start-after": []string{"start-after"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"fetch-owner": []string{"true"},
|
||||
"max-keys": []string{"100"},
|
||||
"encoding-type": []string{"gzip"},
|
||||
@@ -44,7 +44,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
||||
prefix: "photos/",
|
||||
token: "token",
|
||||
startAfter: "start-after",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
fetchOwner: true,
|
||||
maxKeys: 100,
|
||||
encodingType: "gzip",
|
||||
@@ -53,16 +53,16 @@ func TestListObjectsV2Resources(t *testing.T) {
|
||||
{
|
||||
values: url.Values{
|
||||
"prefix": []string{"photos/"},
|
||||
"continuation-token": []string{"token"},
|
||||
"continuation-token": []string{"dG9rZW4="},
|
||||
"start-after": []string{"start-after"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"fetch-owner": []string{"true"},
|
||||
"encoding-type": []string{"gzip"},
|
||||
},
|
||||
prefix: "photos/",
|
||||
token: "token",
|
||||
startAfter: "start-after",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
fetchOwner: true,
|
||||
maxKeys: 1000,
|
||||
encodingType: "gzip",
|
||||
@@ -73,7 +73,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
||||
"prefix": []string{"photos/"},
|
||||
"continuation-token": []string{""},
|
||||
"start-after": []string{"start-after"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"fetch-owner": []string{"true"},
|
||||
"encoding-type": []string{"gzip"},
|
||||
},
|
||||
@@ -130,13 +130,13 @@ func TestListObjectsV1Resources(t *testing.T) {
|
||||
values: url.Values{
|
||||
"prefix": []string{"photos/"},
|
||||
"marker": []string{"test"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"max-keys": []string{"100"},
|
||||
"encoding-type": []string{"gzip"},
|
||||
},
|
||||
prefix: "photos/",
|
||||
marker: "test",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
maxKeys: 100,
|
||||
encodingType: "gzip",
|
||||
},
|
||||
@@ -144,12 +144,12 @@ func TestListObjectsV1Resources(t *testing.T) {
|
||||
values: url.Values{
|
||||
"prefix": []string{"photos/"},
|
||||
"marker": []string{"test"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"encoding-type": []string{"gzip"},
|
||||
},
|
||||
prefix: "photos/",
|
||||
marker: "test",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
maxKeys: 1000,
|
||||
encodingType: "gzip",
|
||||
},
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -26,6 +27,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
)
|
||||
@@ -43,6 +45,45 @@ type LocationResponse struct {
|
||||
Location string `xml:",chardata"`
|
||||
}
|
||||
|
||||
// ListVersionsResponse - format for list bucket versions response.
|
||||
type ListVersionsResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResult" json:"-"`
|
||||
|
||||
Name string
|
||||
Prefix string
|
||||
KeyMarker string
|
||||
|
||||
// When response is truncated (the IsTruncated element value in the response
|
||||
// is true), you can use the key name in this field as marker in the subsequent
|
||||
// request to get next set of objects. Server lists objects in alphabetical
|
||||
// order Note: This element is returned only if you have delimiter request parameter
|
||||
// specified. If response does not include the NextMaker and it is truncated,
|
||||
// you can use the value of the last Key in the response as the marker in the
|
||||
// subsequent request to get the next set of object keys.
|
||||
NextKeyMarker string `xml:"NextKeyMarker,omitempty"`
|
||||
|
||||
// When the number of responses exceeds the value of MaxKeys,
|
||||
// NextVersionIdMarker specifies the first object version not
|
||||
// returned that satisfies the search criteria. Use this value
|
||||
// for the version-id-marker request parameter in a subsequent request.
|
||||
NextVersionIDMarker string `xml:"NextVersionIdMarker"`
|
||||
|
||||
// Marks the last version of the Key returned in a truncated response.
|
||||
VersionIDMarker string `xml:"VersionIdMarker"`
|
||||
|
||||
MaxKeys int
|
||||
Delimiter string
|
||||
// A flag that indicates whether or not ListObjects returned all of the results
|
||||
// that satisfied the search criteria.
|
||||
IsTruncated bool
|
||||
|
||||
CommonPrefixes []CommonPrefix
|
||||
Versions []ObjectVersion
|
||||
|
||||
// Encoding type used to encode object keys in the response.
|
||||
EncodingType string `xml:"EncodingType,omitempty"`
|
||||
}
|
||||
|
||||
// ListObjectsResponse - format for list objects response.
|
||||
type ListObjectsResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"`
|
||||
@@ -190,6 +231,14 @@ type Bucket struct {
|
||||
CreationDate string // time string of format "2006-01-02T15:04:05.000Z"
|
||||
}
|
||||
|
||||
// ObjectVersion container for object version metadata
|
||||
type ObjectVersion struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version" json:"-"`
|
||||
Object
|
||||
VersionID string `xml:"VersionId"`
|
||||
IsLatest bool
|
||||
}
|
||||
|
||||
// Object container for object metadata
|
||||
type Object struct {
|
||||
Key string
|
||||
@@ -292,14 +341,14 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
}
|
||||
u := &url.URL{
|
||||
Host: r.Host,
|
||||
Path: path.Join(slashSeparator, bucket, object),
|
||||
Path: path.Join(SlashSeparator, bucket, object),
|
||||
Scheme: proto,
|
||||
}
|
||||
// If domain is set then we need to use bucket DNS style.
|
||||
for _, domain := range domains {
|
||||
if strings.Contains(r.Host, domain) {
|
||||
u.Host = bucket + "." + r.Host
|
||||
u.Path = path.Join(slashSeparator, object)
|
||||
u.Path = path.Join(SlashSeparator, object)
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -327,6 +376,52 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
return data
|
||||
}
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListVersionsResponse {
|
||||
var versions []ObjectVersion
|
||||
var prefixes []CommonPrefix
|
||||
var owner = Owner{}
|
||||
var data = ListVersionsResponse{}
|
||||
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
for _, object := range resp.Objects {
|
||||
var content = ObjectVersion{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
content.Owner = owner
|
||||
content.VersionID = "null"
|
||||
content.IsLatest = true
|
||||
versions = append(versions, content)
|
||||
}
|
||||
data.Name = bucket
|
||||
data.Versions = versions
|
||||
|
||||
data.EncodingType = encodingType
|
||||
data.Prefix = s3EncodeName(prefix, encodingType)
|
||||
data.KeyMarker = s3EncodeName(marker, encodingType)
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextKeyMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
prefixes = append(prefixes, prefixItem)
|
||||
}
|
||||
data.CommonPrefixes = prefixes
|
||||
return data
|
||||
}
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
var contents []Object
|
||||
@@ -404,8 +499,8 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.Prefix = s3EncodeName(prefix, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
data.ContinuationToken = token
|
||||
data.NextContinuationToken = nextToken
|
||||
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
|
||||
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
|
||||
data.IsTruncated = isTruncated
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
@@ -522,9 +617,9 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []ObjectIdentifier,
|
||||
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
setCommonHeaders(w)
|
||||
if mType != mimeNone {
|
||||
w.Header().Set("Content-Type", string(mType))
|
||||
w.Header().Set(xhttp.ContentType, string(mType))
|
||||
}
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
|
||||
w.Header().Set(xhttp.ContentLength, strconv.Itoa(len(response)))
|
||||
w.WriteHeader(statusCode)
|
||||
if response != nil {
|
||||
w.Write(response)
|
||||
@@ -563,7 +658,7 @@ func writeSuccessNoContent(w http.ResponseWriter) {
|
||||
|
||||
// writeRedirectSeeOther writes Location header with http status 303
|
||||
func writeRedirectSeeOther(w http.ResponseWriter, location string) {
|
||||
w.Header().Set("Location", location)
|
||||
w.Header().Set(xhttp.Location, location)
|
||||
writeResponse(w, http.StatusSeeOther, nil, mimeNone)
|
||||
}
|
||||
|
||||
@@ -577,12 +672,12 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set("Retry-After", "120")
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser {
|
||||
w.Header().Set("Location", minioReservedBucketPath+reqURL.Path)
|
||||
if browser && globalIsBrowserEnabled {
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
@@ -590,7 +685,7 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path,
|
||||
w.Header().Get(responseRequestIDKey), w.Header().Get(responseDeploymentIDKey))
|
||||
w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
}
|
||||
@@ -603,7 +698,7 @@ func writeErrorResponseHeadersOnly(w http.ResponseWriter, err APIError) {
|
||||
// useful for admin APIs.
|
||||
func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(responseRequestIDKey), w.Header().Get(responseDeploymentIDKey))
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
@@ -621,8 +716,8 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
|
||||
Resource: reqURL.Path,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
RequestID: w.Header().Get(responseRequestIDKey),
|
||||
HostID: w.Header().Get(responseDeploymentIDKey),
|
||||
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
||||
HostID: globalDeploymentID,
|
||||
}
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
@@ -637,12 +732,12 @@ func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err
|
||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set("Retry-After", "120")
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalIsBrowserEnabled {
|
||||
w.Header().Set("Location", minioReservedBucketPath+reqURL.Path)
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
@@ -655,8 +750,8 @@ func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err
|
||||
Resource: reqURL.Path,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
RequestID: w.Header().Get(responseRequestIDKey),
|
||||
HostID: w.Header().Get(responseDeploymentIDKey),
|
||||
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
||||
HostID: globalDeploymentID,
|
||||
}
|
||||
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
@@ -47,7 +48,7 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
}
|
||||
|
||||
// API Router
|
||||
apiRouter := router.PathPrefix("/").Subrouter()
|
||||
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
|
||||
var routers []*mux.Router
|
||||
for _, domainName := range globalDomainNames {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
@@ -58,98 +59,107 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
for _, bucket := range routers {
|
||||
// Object operations
|
||||
// HeadObject
|
||||
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.HeadObjectHandler))
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.HeadObjectHandler))
|
||||
// CopyObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectPxarts
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.ListObjectPartsHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectParts
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.ListObjectPartsHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.CompleteMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.CompleteMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.NewMultipartUploadHandler)).Queries("uploads", "")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.NewMultipartUploadHandler)).Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.AbortMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.AbortMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObjectACL - this is a dummy call.
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectACLHandler)).Queries("acl", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectACLHandler)).Queries("acl", "")
|
||||
// GetObjectTagging - this is a dummy call.
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectTaggingHandler)).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectTaggingHandler)).Queries("tagging", "")
|
||||
// SelectObjectContent
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.SelectObjectContentHandler)).Queries("select", "").Queries("select-type", "2")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.SelectObjectContentHandler)).Queries("select", "").Queries("select-type", "2")
|
||||
// GetObject
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectHandler))
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectHandler))
|
||||
// CopyObject
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectHandler))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectHandler))
|
||||
// PutObject
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectHandler))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectHandler))
|
||||
// DeleteObject
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.DeleteObjectHandler))
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.DeleteObjectHandler))
|
||||
|
||||
/// Bucket operations
|
||||
// GetBucketLocation
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLocationHandler)).Queries("location", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketLocationHandler)).Queries("location", "")
|
||||
// GetBucketPolicy
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketPolicyHandler)).Queries("policy", "")
|
||||
// GetBucketLifecycle
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketACLHandler)).Queries("acl", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketACLHandler)).Queries("acl", "")
|
||||
// GetBucketCors - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketCorsHandler)).Queries("cors", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketCorsHandler)).Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketWebsiteHandler)).Queries("website", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketWebsiteHandler)).Queries("website", "")
|
||||
// GetBucketVersioningHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketVersioningHandler)).Queries("versioning", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketVersioningHandler)).Queries("versioning", "")
|
||||
// GetBucketAccelerateHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketAccelerateHandler)).Queries("accelerate", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketAccelerateHandler)).Queries("accelerate", "")
|
||||
// GetBucketRequestPaymentHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketRequestPaymentHandler)).Queries("requestPayment", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketRequestPaymentHandler)).Queries("requestPayment", "")
|
||||
// GetBucketLoggingHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLoggingHandler)).Queries("logging", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketLoggingHandler)).Queries("logging", "")
|
||||
// GetBucketLifecycleHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
// GetBucketReplicationHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketReplicationHandler)).Queries("replication", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketReplicationHandler)).Queries("replication", "")
|
||||
// GetBucketTaggingHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketTaggingHandler)).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketTaggingHandler)).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketWebsiteHandler)).Queries("website", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(httpTraceAll(api.DeleteBucketWebsiteHandler)).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketTaggingHandler)).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(httpTraceAll(api.DeleteBucketTaggingHandler)).Queries("tagging", "")
|
||||
|
||||
// GetBucketNotification
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketNotificationHandler)).Queries("notification", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketNotificationHandler)).Queries("notification", "")
|
||||
// ListenBucketNotification
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListenBucketNotificationHandler)).Queries("events", "{events:.*}")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListenBucketNotificationHandler)).Queries("events", "{events:.*}")
|
||||
// ListMultipartUploads
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListMultipartUploadsHandler)).Queries("uploads", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListMultipartUploadsHandler)).Queries("uploads", "")
|
||||
// ListObjectsV2
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListObjectsV2Handler)).Queries("list-type", "2")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListObjectsV2Handler)).Queries("list-type", "2")
|
||||
// ListBucketVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListBucketObjectVersionsHandler)).Queries("versions", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListObjectsV1Handler))
|
||||
// PutBucketLifecycle
|
||||
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
// PutBucketPolicy
|
||||
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketPolicyHandler)).Queries("policy", "")
|
||||
|
||||
// PutBucketNotification
|
||||
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketNotificationHandler)).Queries("notification", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(httpTraceAll(api.PutBucketNotificationHandler)).Queries("notification", "")
|
||||
// PutBucket
|
||||
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketHandler))
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(httpTraceAll(api.PutBucketHandler))
|
||||
// HeadBucket
|
||||
bucket.Methods("HEAD").HandlerFunc(httpTraceAll(api.HeadBucketHandler))
|
||||
bucket.Methods(http.MethodHead).HandlerFunc(httpTraceAll(api.HeadBucketHandler))
|
||||
// PostPolicy
|
||||
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(httpTraceHdrs(api.PostPolicyBucketHandler))
|
||||
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(httpTraceHdrs(api.PostPolicyBucketHandler))
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods("POST").HandlerFunc(httpTraceAll(api.DeleteMultipleObjectsHandler)).Queries("delete", "")
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(httpTraceAll(api.DeleteMultipleObjectsHandler)).Queries("delete", "")
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketPolicyHandler)).Queries("policy", "")
|
||||
// DeleteBucketLifecycle
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
// DeleteBucket
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketHandler))
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(httpTraceAll(api.DeleteBucketHandler))
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods("GET").Path("/").HandlerFunc(httpTraceAll(api.ListBucketsHandler))
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(httpTraceAll(api.ListBucketsHandler))
|
||||
|
||||
// If none of the routes match.
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"strings"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
@@ -38,41 +39,41 @@ import (
|
||||
|
||||
// Verify if request has JWT.
|
||||
func isRequestJWT(r *http.Request) bool {
|
||||
return strings.HasPrefix(r.Header.Get("Authorization"), jwtAlgorithm)
|
||||
return strings.HasPrefix(r.Header.Get(xhttp.Authorization), jwtAlgorithm)
|
||||
}
|
||||
|
||||
// Verify if request has AWS Signature Version '4'.
|
||||
func isRequestSignatureV4(r *http.Request) bool {
|
||||
return strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm)
|
||||
return strings.HasPrefix(r.Header.Get(xhttp.Authorization), signV4Algorithm)
|
||||
}
|
||||
|
||||
// Verify if request has AWS Signature Version '2'.
|
||||
func isRequestSignatureV2(r *http.Request) bool {
|
||||
return (!strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) &&
|
||||
strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm))
|
||||
return (!strings.HasPrefix(r.Header.Get(xhttp.Authorization), signV4Algorithm) &&
|
||||
strings.HasPrefix(r.Header.Get(xhttp.Authorization), signV2Algorithm))
|
||||
}
|
||||
|
||||
// Verify if request has AWS PreSign Version '4'.
|
||||
func isRequestPresignedSignatureV4(r *http.Request) bool {
|
||||
_, ok := r.URL.Query()["X-Amz-Credential"]
|
||||
_, ok := r.URL.Query()[xhttp.AmzCredential]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Verify request has AWS PreSign Version '2'.
|
||||
func isRequestPresignedSignatureV2(r *http.Request) bool {
|
||||
_, ok := r.URL.Query()["AWSAccessKeyId"]
|
||||
_, ok := r.URL.Query()[xhttp.AmzAccessKeyID]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Verify if request has AWS Post policy Signature Version '4'.
|
||||
func isRequestPostPolicySignatureV4(r *http.Request) bool {
|
||||
return strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") &&
|
||||
return strings.Contains(r.Header.Get(xhttp.ContentType), "multipart/form-data") &&
|
||||
r.Method == http.MethodPost
|
||||
}
|
||||
|
||||
// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation.
|
||||
func isRequestSignStreamingV4(r *http.Request) bool {
|
||||
return r.Header.Get("x-amz-content-sha256") == streamingContentSHA256 &&
|
||||
return r.Header.Get(xhttp.AmzContentSha256) == streamingContentSHA256 &&
|
||||
r.Method == http.MethodPut
|
||||
}
|
||||
|
||||
@@ -109,9 +110,9 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
return authTypeJWT
|
||||
} else if isRequestPostPolicySignatureV4(r) {
|
||||
return authTypePostPolicy
|
||||
} else if _, ok := r.URL.Query()["Action"]; ok {
|
||||
} else if _, ok := r.URL.Query()[xhttp.Action]; ok {
|
||||
return authTypeSTS
|
||||
} else if _, ok := r.Header["Authorization"]; !ok {
|
||||
} else if _, ok := r.Header[xhttp.Authorization]; !ok {
|
||||
return authTypeAnonymous
|
||||
}
|
||||
return authTypeUnknown
|
||||
@@ -121,7 +122,7 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, region string) APIErrorCode {
|
||||
s3Err := ErrAccessDenied
|
||||
if _, ok := r.Header["X-Amz-Content-Sha256"]; ok &&
|
||||
if _, ok := r.Header[xhttp.AmzContentSha256]; ok &&
|
||||
getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) {
|
||||
// We only support admin credentials to access admin APIs.
|
||||
|
||||
@@ -148,11 +149,11 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, region stri
|
||||
|
||||
// Fetch the security token set by the client.
|
||||
func getSessionToken(r *http.Request) (token string) {
|
||||
token = r.Header.Get("X-Amz-Security-Token")
|
||||
token = r.Header.Get(xhttp.AmzSecurityToken)
|
||||
if token != "" {
|
||||
return token
|
||||
}
|
||||
return r.URL.Query().Get("X-Amz-Security-Token")
|
||||
return r.URL.Query().Get(xhttp.AmzSecurityToken)
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
@@ -202,6 +203,12 @@ func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
}
|
||||
|
||||
if globalPolicyOPA == nil {
|
||||
// If OPA is not set and if ldap claim key is set,
|
||||
// allow the claim.
|
||||
if _, ok := claims[ldapUser]; ok {
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// If OPA is not set, session token should
|
||||
// have a policy and its mandatory, reject
|
||||
// requests without policy claim.
|
||||
@@ -258,14 +265,24 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
|
||||
_, _, s3Err = checkRequestAuthTypeToAccessKey(ctx, r, action, bucketName, objectName)
|
||||
return s3Err
|
||||
}
|
||||
|
||||
// Check request auth type verifies the incoming http request
|
||||
// - validates the request signature
|
||||
// - validates the policy action if anonymous tests bucket policies if any,
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
// Additionally returns the accessKey used in the request, and if this request is by an admin.
|
||||
func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (accessKey string, owner bool, s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return ErrAccessDenied
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return s3Err
|
||||
return accessKey, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
@@ -275,17 +292,18 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
region = ""
|
||||
}
|
||||
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
|
||||
return s3Err
|
||||
return accessKey, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
return accessKey, owner, s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
var claims map[string]interface{}
|
||||
claims, s3Err = checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
return accessKey, owner, s3Err
|
||||
}
|
||||
|
||||
// LocationConstraint is valid only for CreateBucketAction.
|
||||
@@ -295,7 +313,7 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return ErrMalformedXML
|
||||
return accessKey, owner, ErrMalformedXML
|
||||
}
|
||||
|
||||
// Populate payload to extract location constraint.
|
||||
@@ -304,7 +322,7 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
var s3Error APIErrorCode
|
||||
locationConstraint, s3Error = parseLocationConstraint(r)
|
||||
if s3Error != ErrNone {
|
||||
return s3Error
|
||||
return accessKey, owner, s3Error
|
||||
}
|
||||
|
||||
// Populate payload again to handle it in HTTP handler.
|
||||
@@ -320,9 +338,10 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
return ErrNone
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
@@ -334,9 +353,10 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
return ErrNone
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '2'.
|
||||
@@ -370,8 +390,8 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
contentMD5, contentSHA256 []byte
|
||||
)
|
||||
// Extract 'Content-Md5' if present.
|
||||
if _, ok := r.Header["Content-Md5"]; ok {
|
||||
contentMD5, err = base64.StdEncoding.Strict().DecodeString(r.Header.Get("Content-Md5"))
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; ok {
|
||||
contentMD5, err = base64.StdEncoding.Strict().DecodeString(r.Header.Get(xhttp.ContentMD5))
|
||||
if err != nil || len(contentMD5) == 0 {
|
||||
return ErrInvalidDigest
|
||||
}
|
||||
@@ -380,14 +400,14 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
// Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned)
|
||||
// Do not verify 'X-Amz-Content-Sha256' if skipSHA256.
|
||||
if skipSHA256 := skipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) {
|
||||
if sha256Sum, ok := r.URL.Query()["X-Amz-Content-Sha256"]; ok && len(sha256Sum) > 0 {
|
||||
if sha256Sum, ok := r.URL.Query()[xhttp.AmzContentSha256]; ok && len(sha256Sum) > 0 {
|
||||
contentSHA256, err = hex.DecodeString(sha256Sum[0])
|
||||
if err != nil {
|
||||
return ErrContentSHA256Mismatch
|
||||
}
|
||||
}
|
||||
} else if _, ok := r.Header["X-Amz-Content-Sha256"]; !skipSHA256 && ok {
|
||||
contentSHA256, err = hex.DecodeString(r.Header.Get("X-Amz-Content-Sha256"))
|
||||
} else if _, ok := r.Header[xhttp.AmzContentSha256]; !skipSHA256 && ok {
|
||||
contentSHA256, err = hex.DecodeString(r.Header.Get(xhttp.AmzContentSha256))
|
||||
if err != nil || len(contentSHA256) == 0 {
|
||||
return ErrContentSHA256Mismatch
|
||||
}
|
||||
@@ -395,7 +415,8 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
|
||||
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
|
||||
// The verification happens implicit during reading.
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1, globalCLIContext.StrictS3Compat)
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5),
|
||||
hex.EncodeToString(contentSHA256), -1, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
},
|
||||
Header: http.Header{
|
||||
"Authorization": []string{"AWS4-HMAC-SHA256 <cred_string>"},
|
||||
@@ -62,7 +62,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
},
|
||||
Header: http.Header{
|
||||
"Authorization": []string{"Bearer 12313123"},
|
||||
@@ -77,7 +77,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
},
|
||||
Header: http.Header{
|
||||
"Authorization": []string{""},
|
||||
@@ -92,7 +92,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1",
|
||||
},
|
||||
},
|
||||
@@ -105,7 +105,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
},
|
||||
Header: http.Header{
|
||||
"Content-Type": []string{"multipart/form-data"},
|
||||
|
||||
@@ -62,11 +62,11 @@ func (h *healRoutine) run() {
|
||||
break
|
||||
}
|
||||
if globalHTTPServer != nil {
|
||||
// Wait at max 1 minute for an inprogress request
|
||||
// before proceeding to heal
|
||||
waitCount := 60
|
||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||
waitCount := 600
|
||||
// Any requests in progress, delay the heal.
|
||||
for globalHTTPServer.GetRequestCount() > 2 && waitCount > 0 {
|
||||
for (globalHTTPServer.GetRequestCount() >= int32(globalXLSetCount*globalXLSetDriveCount)) &&
|
||||
waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
@@ -131,7 +132,8 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err = hashMismatchError{hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))}
|
||||
err = fmt.Errorf("hashes do not match expected %s, got %s",
|
||||
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -156,32 +156,10 @@ func bitrotWriterSum(w io.Writer) []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify if a file has bitrot error.
|
||||
func bitrotCheckFile(disk StorageAPI, volume string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) (err error) {
|
||||
// Returns the size of the file with bitrot protection
|
||||
func bitrotShardFileSize(size int64, shardSize int64, algo BitrotAlgorithm) int64 {
|
||||
if algo != HighwayHash256S {
|
||||
buf := []byte{}
|
||||
// For whole-file bitrot we don't need to read the entire file as the bitrot verify happens on the server side even if we read 0-bytes.
|
||||
_, err = disk.ReadFile(volume, filePath, 0, buf, NewBitrotVerifier(algo, sum))
|
||||
return err
|
||||
return size
|
||||
}
|
||||
buf := make([]byte, shardSize)
|
||||
r := newStreamingBitrotReader(disk, volume, filePath, tillOffset, algo, shardSize)
|
||||
defer closeBitrotReaders([]io.ReaderAt{r})
|
||||
var offset int64
|
||||
for {
|
||||
if offset == tillOffset {
|
||||
break
|
||||
}
|
||||
var n int
|
||||
tmpBuf := buf
|
||||
if int64(len(tmpBuf)) > (tillOffset - offset) {
|
||||
tmpBuf = tmpBuf[:(tillOffset - offset)]
|
||||
}
|
||||
n, err = r.ReadAt(tmpBuf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offset += int64(n)
|
||||
}
|
||||
return nil
|
||||
return ceilFrac(size, shardSize)*int64(algo.New().Size()) + size
|
||||
}
|
||||
|
||||
@@ -46,16 +46,86 @@ func validateListObjectsArgs(prefix, marker, delimiter, encodingType string, max
|
||||
}
|
||||
}
|
||||
|
||||
/// MinIO special conditions for ListObjects.
|
||||
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != "/" {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
// Success.
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// ListBucketObjectVersions - GET Bucket Object versions
|
||||
// You can use the versions subresource to list metadata about all
|
||||
// of the versions of objects in a bucket.
|
||||
func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBucketObjectVersions")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListBucketObjectVersions", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
urlValues := r.URL.Query()
|
||||
|
||||
// Extract all the listBucketVersions query params to their native values.
|
||||
// versionIDMarker is ignored here.
|
||||
prefix, marker, delimiter, maxkeys, encodingType, _, errCode := getListBucketObjectVersionsArgs(urlValues)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the query params before beginning to serve the request.
|
||||
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, encodingType, maxkeys); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
listObjects := objectAPI.ListObjects
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshaled into S3 compatible XML header.
|
||||
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxkeys)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsInfo.Objects {
|
||||
var actualSize int64
|
||||
if listObjectsInfo.Objects[i].IsCompressed() {
|
||||
// Read the decompressed size from the meta.json.
|
||||
actualSize = listObjectsInfo.Objects[i].GetActualSize()
|
||||
if actualSize < 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDecompressedSize),
|
||||
r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Set the info.Size to the actualSize.
|
||||
listObjectsInfo.Objects[i].Size = actualSize
|
||||
} else if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
||||
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
|
||||
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].DecryptedSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType, maxkeys, listObjectsInfo)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
}
|
||||
|
||||
// ListObjectsV2Handler - GET Bucket (List Objects) Version 2.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
@@ -100,9 +170,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
listObjectsV2 := objectAPI.ListObjectsV2
|
||||
if api.CacheAPI() != nil {
|
||||
listObjectsV2 = api.CacheAPI().ListObjectsV2
|
||||
}
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshaled into S3 compatible XML header.
|
||||
@@ -179,9 +247,6 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
listObjects := objectAPI.ListObjects
|
||||
if api.CacheAPI() != nil {
|
||||
listObjects = api.CacheAPI().ListObjects
|
||||
}
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -32,11 +33,13 @@ import (
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
@@ -49,15 +52,27 @@ import (
|
||||
// -- If yes, check if the IP of entry matches local IP. This means entry is for this instance.
|
||||
// -- If IP of the entry doesn't match, this means entry is for another instance. Log an error to console.
|
||||
func initFederatorBackend(objLayer ObjectLayer) {
|
||||
// Get buckets in the backend
|
||||
b, err := objLayer.ListBuckets(context.Background())
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get buckets in the DNS
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && err != dns.ErrNoEntriesFound {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return
|
||||
}
|
||||
|
||||
bucketSet := set.NewStringSet()
|
||||
|
||||
// Add buckets that are not registered with the DNS
|
||||
g := errgroup.WithNErrs(len(b))
|
||||
for index := range b {
|
||||
index := index
|
||||
bucketSet.Add(b[index].Name)
|
||||
g.Go(func() error {
|
||||
r, gerr := globalDNSConfig.Get(b[index].Name)
|
||||
if gerr != nil {
|
||||
@@ -77,7 +92,38 @@ func initFederatorBackend(objLayer ObjectLayer) {
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
g = errgroup.WithNErrs(len(dnsBuckets))
|
||||
// Remove buckets that are in DNS for this server, but aren't local
|
||||
for index := range dnsBuckets {
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
// This is a local bucket that exists, so we can continue
|
||||
if bucketSet.Contains(dnsBuckets[index].Key) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is not for our server, so we can continue
|
||||
hostPort := net.JoinHostPort(dnsBuckets[index].Host, fmt.Sprintf("%d", dnsBuckets[index].Port))
|
||||
if globalDomainIPs.Intersection(set.CreateStringSet(hostPort)).IsEmpty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We go to here, so we know the bucket no longer exists, but is registered in DNS to this server
|
||||
if err := globalDNSConfig.DeleteRecord(dnsBuckets[index]); err != nil {
|
||||
return fmt.Errorf("Failed to remove DNS entry for %s due to %v", dnsBuckets[index].Key, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -105,9 +151,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getBucketInfo = api.CacheAPI().GetBucketInfo
|
||||
}
|
||||
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -202,11 +246,9 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
if api.CacheAPI() != nil {
|
||||
listBuckets = api.CacheAPI().ListBuckets
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListAllMyBucketsAction, "", ""); s3Error != ErrNone {
|
||||
accessKey, owner, s3Error := checkRequestAuthTypeToAccessKey(ctx, r, policy.ListAllMyBucketsAction, "", "")
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -240,8 +282,32 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
}
|
||||
|
||||
// Set prefix value for "s3:prefix" policy conditionals.
|
||||
r.Header.Set("prefix", "")
|
||||
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
// err will be nil here as we already called this function
|
||||
// earlier in this request.
|
||||
claims, _ := getClaimsFromToken(r)
|
||||
var newBucketsInfo []BucketInfo
|
||||
for _, bucketInfo := range bucketsInfo {
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", accessKey),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
newBucketsInfo = append(newBucketsInfo, bucketInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate response.
|
||||
response := generateListBucketsResponse(bucketsInfo)
|
||||
response := generateListBucketsResponse(newBucketsInfo)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
|
||||
// Write response.
|
||||
@@ -445,7 +511,8 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
w.Header().Set("Location", getObjectLocation(r, globalDomainNames, bucket, ""))
|
||||
w.Header().Set(xhttp.Location,
|
||||
getObjectLocation(r, globalDomainNames, bucket, ""))
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
return
|
||||
@@ -466,7 +533,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
w.Header().Set("Location", path.Clean(r.URL.Path)) // Clean any trailing slashes.
|
||||
w.Header().Set(xhttp.Location, path.Clean(r.URL.Path)) // Clean any trailing slashes.
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
@@ -490,14 +557,16 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !api.EncryptionEnabled() && hasServerSideEncryptionHeader(r.Header) {
|
||||
if !api.EncryptionEnabled() && crypto.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
|
||||
// To detect if the client has disconnected.
|
||||
r.Body = &detectDisconnect{r.Body, r.Context().Done()}
|
||||
|
||||
// Require Content-Length to be set in the request
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
@@ -532,7 +601,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
// Remove all tmp files creating during multipart upload
|
||||
// Remove all tmp files created during multipart upload
|
||||
defer form.RemoveAll()
|
||||
|
||||
// Extract all form fields
|
||||
@@ -587,9 +656,10 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Handle policy if it is set.
|
||||
if len(policyBytes) > 0 {
|
||||
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -645,7 +715,11 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C and SSE-S3 requests
|
||||
if crypto.IsRequested(formValues) && !hasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
var reader io.Reader
|
||||
var key []byte
|
||||
if crypto.SSEC.IsRequested(formValues) {
|
||||
@@ -678,8 +752,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
location := getObjectLocation(r, globalDomainNames, bucket, object)
|
||||
w.Header()["ETag"] = []string{`"` + objInfo.ETag + `"`}
|
||||
w.Header().Set("Location", location)
|
||||
w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`}
|
||||
w.Header().Set(xhttp.Location, location)
|
||||
|
||||
// Notify object created event.
|
||||
defer sendEvent(eventArgs{
|
||||
@@ -742,9 +816,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getBucketInfo = api.CacheAPI().GetBucketInfo
|
||||
}
|
||||
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
@@ -774,9 +846,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
deleteBucket := objectAPI.DeleteBucket
|
||||
if api.CacheAPI() != nil {
|
||||
deleteBucket = api.CacheAPI().DeleteBucket
|
||||
}
|
||||
|
||||
// Attempt to delete bucket.
|
||||
if err := deleteBucket(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
@@ -795,6 +865,8 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
globalNotificationSys.RemoveNotification(bucket)
|
||||
globalPolicySys.Remove(bucket)
|
||||
globalNotificationSys.DeleteBucket(ctx, bucket)
|
||||
globalLifecycleSys.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketLifecycle(ctx, bucket)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
|
||||
@@ -69,7 +69,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
locationResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: "/" + bucketName + "/",
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidAccessKeyId",
|
||||
Message: "The access key ID you provided does not exist in our records.",
|
||||
},
|
||||
@@ -394,7 +394,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
prefix: "",
|
||||
keyMarker: "",
|
||||
uploadIDMarker: "",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
maxUploads: "100",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
165
cmd/bucket-lifecycle-handler.go
Normal file
165
cmd/bucket-lifecycle-handler.go
Normal file
@@ -0,0 +1,165 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/lifecycle"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
// Lifecycle configuration file.
|
||||
bucketLifecycleConfig = "lifecycle.xml"
|
||||
)
|
||||
|
||||
// PutBucketLifecycleHandler - This HTTP handler stores given bucket lifecycle configuration as per
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
|
||||
func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketLifecycle")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketLifecycle", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketLifecycleAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// PutBucketLifecycle always needs a Content-Md5
|
||||
if _, ok := r.Header["Content-Md5"]; !ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = objAPI.SetBucketLifecycle(ctx, bucket, bucketLifecycle); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalLifecycleSys.Set(bucket, *bucketLifecycle)
|
||||
globalNotificationSys.SetBucketLifecycle(ctx, bucket, bucketLifecycle)
|
||||
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// GetBucketLifecycleHandler - This HTTP handler returns bucket policy configuration.
|
||||
func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketLifecycle")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketLifecycle", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketLifecycleAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Read bucket access lifecycle.
|
||||
bucketLifecycle, err := objAPI.GetBucketLifecycle(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
lifecycleData, err := xml.Marshal(bucketLifecycle)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write lifecycle configuration to client.
|
||||
writeSuccessResponseXML(w, lifecycleData)
|
||||
}
|
||||
|
||||
// DeleteBucketLifecycleHandler - This HTTP handler removes bucket lifecycle configuration.
|
||||
func (api objectAPIHandlers) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketLifecycle")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketLifecycle", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketLifecycleAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err := objAPI.DeleteBucketLifecycle(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalLifecycleSys.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketLifecycle(ctx, bucket)
|
||||
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
@@ -17,12 +17,15 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
@@ -48,6 +51,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
var config *event.Config
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -71,24 +75,31 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Attempt to successfully load notification config.
|
||||
nConfig, err := readNotificationConfig(ctx, objAPI, bucketName)
|
||||
// Construct path to notification.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig)
|
||||
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
// Ignore errNoSuchNotifications to comply with AWS S3.
|
||||
if err != errNoSuchNotifications {
|
||||
if err != errConfigNotFound {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
config = &event.Config{}
|
||||
} else {
|
||||
if err = xml.NewDecoder(bytes.NewReader(configData)).Decode(&config); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
nConfig = &event.Config{}
|
||||
}
|
||||
|
||||
config.SetRegion(globalServerConfig.GetRegion())
|
||||
|
||||
// If xml namespace is empty, set a default value before returning.
|
||||
if nConfig.XMLNS == "" {
|
||||
nConfig.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
if config.XMLNS == "" {
|
||||
config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
}
|
||||
|
||||
notificationBytes, err := xml.Marshal(nConfig)
|
||||
notificationBytes, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -142,9 +153,10 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
if event.IsEventError(err) {
|
||||
apiErr = toAPIError(ctx, err)
|
||||
}
|
||||
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
if _, ok := err.(*event.ErrARNNotFound); !ok {
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = saveNotificationConfig(ctx, objectAPI, bucketName, config); err != nil {
|
||||
@@ -246,6 +258,8 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set(xhttp.ContentType, "text/event-stream")
|
||||
|
||||
target, err := target.NewHTTPClientTarget(*host, w)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
|
||||
@@ -87,7 +87,7 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
|
||||
// Load all custom CA files.
|
||||
for _, fi := range fis {
|
||||
// Skip all directories.
|
||||
if hasSuffix(fi, slashSeparator) {
|
||||
if hasSuffix(fi, SlashSeparator) {
|
||||
continue
|
||||
}
|
||||
caCert, err := ioutil.ReadFile(pathJoin(certsCAsDir, fi))
|
||||
|
||||
@@ -31,49 +31,72 @@ import (
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/logger/target/console"
|
||||
"github.com/minio/minio/cmd/logger/target/http"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
|
||||
if (globalAutoEncryption || GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Encryption support is requested but '%s' does not support encryption", name)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "gateway") {
|
||||
if GlobalGatewaySSE.IsSet() && GlobalKMS == nil {
|
||||
uiErr := uiErrInvalidGWSSEEnvValue(nil).Msg("MINIO_GATEWAY_SSE set but KMS is not configured")
|
||||
logger.Fatal(uiErr, "Unable to start gateway with SSE")
|
||||
}
|
||||
}
|
||||
|
||||
if globalIsCompressionEnabled && !objAPI.IsCompressionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Compression support is requested but '%s' does not support compression", name)
|
||||
}
|
||||
}
|
||||
|
||||
// Check for updates and print a notification message
|
||||
func checkUpdate(mode string) {
|
||||
// Its OK to ignore any errors during doUpdate() here.
|
||||
if updateMsg, _, currentReleaseTime, latestReleaseTime, err := getUpdateInfo(2*time.Second, mode); err == nil {
|
||||
if updateMsg == "" {
|
||||
return
|
||||
}
|
||||
if globalInplaceUpdateDisabled {
|
||||
logger.StartupMessage(updateMsg)
|
||||
logStartupMessage(updateMsg)
|
||||
} else {
|
||||
logger.StartupMessage(prepareUpdateMessage("Run `minio update`", latestReleaseTime.Sub(currentReleaseTime)))
|
||||
logStartupMessage(prepareUpdateMessage("Run `mc admin update`", latestReleaseTime.Sub(currentReleaseTime)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Load logger targets based on user's configuration
|
||||
func loadLoggers() {
|
||||
loggerUserAgent := getUserAgent(getMinioMode())
|
||||
|
||||
auditEndpoint, ok := os.LookupEnv("MINIO_AUDIT_LOGGER_HTTP_ENDPOINT")
|
||||
if ok {
|
||||
// Enable audit HTTP logging through ENV.
|
||||
logger.AddAuditTarget(http.New(auditEndpoint, NewCustomHTTPTransport()))
|
||||
logger.AddAuditTarget(http.New(auditEndpoint, loggerUserAgent, NewCustomHTTPTransport()))
|
||||
}
|
||||
|
||||
loggerEndpoint, ok := os.LookupEnv("MINIO_LOGGER_HTTP_ENDPOINT")
|
||||
if ok {
|
||||
// Enable HTTP logging through ENV.
|
||||
logger.AddTarget(http.New(loggerEndpoint, NewCustomHTTPTransport()))
|
||||
logger.AddTarget(http.New(loggerEndpoint, loggerUserAgent, NewCustomHTTPTransport()))
|
||||
} else {
|
||||
for _, l := range globalServerConfig.Logger.HTTP {
|
||||
if l.Enabled {
|
||||
// Enable http logging
|
||||
logger.AddTarget(http.New(l.Endpoint, NewCustomHTTPTransport()))
|
||||
logger.AddTarget(http.New(l.Endpoint, loggerUserAgent, NewCustomHTTPTransport()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if globalServerConfig.Logger.Console.Enabled {
|
||||
// Enable console logging
|
||||
logger.AddTarget(console.New())
|
||||
logger.AddTarget(globalConsoleSys.Console())
|
||||
}
|
||||
|
||||
}
|
||||
@@ -335,6 +358,14 @@ func handleCommonEnvVars() {
|
||||
globalCacheMaxUse = maxUse
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
if cacheEncKey := os.Getenv("MINIO_CACHE_ENCRYPTION_MASTER_KEY"); cacheEncKey != "" {
|
||||
globalCacheKMSKeyID, globalCacheKMS, err = parseKMSMasterKey(cacheEncKey)
|
||||
if err != nil {
|
||||
logger.Fatal(uiErrInvalidCacheEncryptionKey(err), "Invalid cache encryption master key")
|
||||
}
|
||||
}
|
||||
// In place update is true by default if the MINIO_UPDATE is not set
|
||||
// or is not set to 'off', if MINIO_UPDATE is set to 'off' then
|
||||
// in-place update is off.
|
||||
@@ -407,3 +438,10 @@ func handleCommonEnvVars() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logStartupMessage(msg string, data ...interface{}) {
|
||||
if globalConsoleSys != nil {
|
||||
globalConsoleSys.Send(msg)
|
||||
}
|
||||
logger.StartupMessage(msg, data...)
|
||||
}
|
||||
|
||||
@@ -20,9 +20,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
@@ -51,38 +49,10 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func deleteConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err := client.Delete(timeoutCtx, configFile)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s",
|
||||
client.Endpoints())
|
||||
}
|
||||
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s",
|
||||
err, client.Endpoints())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
return objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
|
||||
}
|
||||
|
||||
func saveConfigEtcd(ctx context.Context, client *etcd.Client, configFile string, data []byte) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
_, err := client.Put(timeoutCtx, configFile, string(data))
|
||||
if err == context.DeadlineExceeded {
|
||||
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)), globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
@@ -93,29 +63,6 @@ func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data
|
||||
return err
|
||||
}
|
||||
|
||||
func readConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) ([]byte, error) {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
resp, err := client.Get(timeoutCtx, configFile)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
return nil, fmt.Errorf("etcd setup is unreachable, please check your endpoints %s",
|
||||
client.Endpoints())
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s",
|
||||
err, client.Endpoints())
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
if string(ev.Key) == configFile {
|
||||
return ev.Value, nil
|
||||
}
|
||||
}
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
func checkConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile, ObjectOptions{}); err != nil {
|
||||
// Treat object not found as config not found.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -30,8 +30,8 @@ import (
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
"github.com/minio/minio/pkg/iam/openid"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
@@ -281,17 +281,33 @@ func (s *serverConfig) loadFromEnvs() {
|
||||
}
|
||||
|
||||
if jwksURL, ok := os.LookupEnv("MINIO_IAM_JWKS_URL"); ok {
|
||||
if u, err := xnet.ParseURL(jwksURL); err == nil {
|
||||
s.OpenID.JWKS.URL = u
|
||||
logger.FatalIf(s.OpenID.JWKS.PopulatePublicKey(), "Unable to populate public key from JWKS URL")
|
||||
u, err := xnet.ParseURL(jwksURL)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to parse MINIO_IAM_JWKS_URL %s", jwksURL)
|
||||
}
|
||||
s.OpenID.JWKS.URL = u
|
||||
}
|
||||
|
||||
if opaURL, ok := os.LookupEnv("MINIO_IAM_OPA_URL"); ok {
|
||||
if u, err := xnet.ParseURL(opaURL); err == nil {
|
||||
s.Policy.OPA.URL = u
|
||||
s.Policy.OPA.AuthToken = os.Getenv("MINIO_IAM_OPA_AUTHTOKEN")
|
||||
u, err := xnet.ParseURL(opaURL)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to parse MINIO_IAM_OPA_URL %s", opaURL)
|
||||
}
|
||||
opaArgs := iampolicy.OpaArgs{
|
||||
URL: u,
|
||||
AuthToken: os.Getenv("MINIO_IAM_OPA_AUTHTOKEN"),
|
||||
Transport: NewCustomHTTPTransport(),
|
||||
CloseRespFn: xhttp.DrainBody,
|
||||
}
|
||||
logger.FatalIf(opaArgs.Validate(), "Unable to reach MINIO_IAM_OPA_URL %s", opaURL)
|
||||
s.Policy.OPA.URL = opaArgs.URL
|
||||
s.Policy.OPA.AuthToken = opaArgs.AuthToken
|
||||
}
|
||||
|
||||
var err error
|
||||
s.LDAPServerConfig, err = newLDAPConfigFromEnv(globalRootCAs)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to parse LDAP configuration from env")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -303,7 +319,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewAMQPTarget(k, v)
|
||||
t, err := target.NewAMQPTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("amqp(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -314,7 +330,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewElasticsearchTarget(k, v)
|
||||
t, err := target.NewElasticsearchTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("elasticsearch(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -325,6 +341,9 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
if v.TLS.Enable {
|
||||
v.TLS.RootCAs = globalRootCAs
|
||||
}
|
||||
t, err := target.NewKafkaTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kafka(%s): %s", k, err.Error())
|
||||
@@ -336,6 +355,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
v.RootCAs = globalRootCAs
|
||||
t, err := target.NewMQTTTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mqtt(%s): %s", k, err.Error())
|
||||
@@ -347,7 +367,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewMySQLTarget(k, v)
|
||||
t, err := target.NewMySQLTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mysql(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -358,7 +378,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewNATSTarget(k, v)
|
||||
t, err := target.NewNATSTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nats(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -369,7 +389,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewNSQTarget(k, v)
|
||||
t, err := target.NewNSQTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nsq(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -380,7 +400,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewPostgreSQLTarget(k, v)
|
||||
t, err := target.NewPostgreSQLTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("postgreSQL(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -391,7 +411,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewRedisTarget(k, v)
|
||||
t, err := target.NewRedisTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("redis(%s): %s", k, err.Error())
|
||||
}
|
||||
@@ -536,7 +556,7 @@ func (s *serverConfig) loadToCachedConfigs() {
|
||||
globalCacheMaxUse = cacheConf.MaxUse
|
||||
}
|
||||
if err := Environment.LookupKMSConfig(s.KMS); err != nil {
|
||||
logger.FatalIf(err, "Unable to setup the KMS")
|
||||
logger.FatalIf(err, "Unable to setup the KMS %s", s.KMS.Vault.Endpoint)
|
||||
}
|
||||
|
||||
if !globalIsCompressionEnabled {
|
||||
@@ -546,15 +566,22 @@ func (s *serverConfig) loadToCachedConfigs() {
|
||||
globalIsCompressionEnabled = compressionConf.Enabled
|
||||
}
|
||||
|
||||
globalIAMValidators = getAuthValidators(s)
|
||||
if s.OpenID.JWKS.URL != nil && s.OpenID.JWKS.URL.String() != "" {
|
||||
logger.FatalIf(s.OpenID.JWKS.PopulatePublicKey(),
|
||||
"Unable to populate public key from JWKS URL %s", s.OpenID.JWKS.URL)
|
||||
}
|
||||
|
||||
globalIAMValidators = getOpenIDValidators(s)
|
||||
|
||||
if s.Policy.OPA.URL != nil && s.Policy.OPA.URL.String() != "" {
|
||||
globalPolicyOPA = iampolicy.NewOpa(iampolicy.OpaArgs{
|
||||
opaArgs := iampolicy.OpaArgs{
|
||||
URL: s.Policy.OPA.URL,
|
||||
AuthToken: s.Policy.OPA.AuthToken,
|
||||
Transport: NewCustomHTTPTransport(),
|
||||
CloseRespFn: xhttp.DrainBody,
|
||||
})
|
||||
}
|
||||
logger.FatalIf(opaArgs.Validate(), "Unable to reach OPA URL %s", s.Policy.OPA.URL)
|
||||
globalPolicyOPA = iampolicy.NewOpa(opaArgs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -611,15 +638,15 @@ func loadConfig(objAPI ObjectLayer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getAuthValidators - returns ValidatorList which contains
|
||||
// getOpenIDValidators - returns ValidatorList which contains
|
||||
// enabled providers in server config.
|
||||
// A new authentication provider is added like below
|
||||
// * Add a new provider in pkg/iam/validator package.
|
||||
func getAuthValidators(config *serverConfig) *validator.Validators {
|
||||
validators := validator.NewValidators()
|
||||
// * Add a new provider in pkg/iam/openid package.
|
||||
func getOpenIDValidators(config *serverConfig) *openid.Validators {
|
||||
validators := openid.NewValidators()
|
||||
|
||||
if config.OpenID.JWKS.URL != nil {
|
||||
validators.Add(validator.NewJWT(config.OpenID.JWKS))
|
||||
validators.Add(openid.NewJWT(config.OpenID.JWKS))
|
||||
}
|
||||
|
||||
return validators
|
||||
@@ -637,7 +664,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
}
|
||||
for id, args := range config.Notify.AMQP {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewAMQPTarget(id, args)
|
||||
newTarget, err := target.NewAMQPTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -651,7 +678,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.Elasticsearch {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewElasticsearchTarget(id, args)
|
||||
newTarget, err := target.NewElasticsearchTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -667,6 +694,9 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.Kafka {
|
||||
if args.Enable {
|
||||
if args.TLS.Enable {
|
||||
args.TLS.RootCAs = globalRootCAs
|
||||
}
|
||||
newTarget, err := target.NewKafkaTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
@@ -696,7 +726,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.MySQL {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewMySQLTarget(id, args)
|
||||
newTarget, err := target.NewMySQLTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -710,7 +740,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.NATS {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewNATSTarget(id, args)
|
||||
newTarget, err := target.NewNATSTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -724,7 +754,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.NSQ {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewNSQTarget(id, args)
|
||||
newTarget, err := target.NewNSQTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -738,7 +768,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.PostgreSQL {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args)
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -752,7 +782,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.Redis {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewRedisTarget(id, args)
|
||||
newTarget, err := target.NewRedisTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
@@ -767,7 +797,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
for id, args := range config.Notify.Webhook {
|
||||
if args.Enable {
|
||||
args.RootCAs = globalRootCAs
|
||||
newTarget := target.NewWebhookTarget(id, args)
|
||||
newTarget := target.NewWebhookTarget(id, args, GlobalServiceDoneCh)
|
||||
if err := targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
|
||||
@@ -185,10 +185,10 @@ func TestValidateConfig(t *testing.T) {
|
||||
{`{"version": "` + v + `", "browser": "on", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, false},
|
||||
|
||||
// Test 11 - Test AMQP
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false, "queueDir": "", "queueLimit": 0}}}}`, false},
|
||||
|
||||
// Test 12 - Test NATS
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "queueDir": "", "queueLimit": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
|
||||
|
||||
// Test 13 - Test ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false},
|
||||
@@ -197,16 +197,16 @@ func TestValidateConfig(t *testing.T) {
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, false},
|
||||
|
||||
// Test 15 - Test PostgreSQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "", "queueDir": "", "queueLimit": 0 }}}}`, false},
|
||||
|
||||
// Test 16 - Test Kafka
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "", "queueDir": "", "queueLimit": 0 } }}}`, false},
|
||||
|
||||
// Test 17 - Test Webhook
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "", "queueDir": "", "queueLimit": 0} }}}`, false},
|
||||
|
||||
// Test 18 - Test MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "", "queueDir": "", "queueLimit": 0 }}}}`, false},
|
||||
|
||||
// Test 19 - Test Format for MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
|
||||
@@ -224,10 +224,10 @@ func TestValidateConfig(t *testing.T) {
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "invalid", "url": "example.com", "index": "myindex" } }}}`, false},
|
||||
|
||||
// Test 24 - Test valid Format for ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "namespace", "url": "example.com", "index": "myindex" } }}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "namespace", "url": "example.com", "index": "myindex", "queueDir": "", "queueLimit": 0 } }}}`, true},
|
||||
|
||||
// Test 25 - Test Format for Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1", "queueDir": "", "queueLimit": 0 } }}}`, false},
|
||||
|
||||
// Test 26 - Test valid Format for Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
|
||||
@@ -236,7 +236,7 @@ func TestValidateConfig(t *testing.T) {
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "username": "", "password": "", "queueDir": "", "queueLimit": 0}}}}`, false},
|
||||
|
||||
// Test 28 - Test NSQ
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nsq": { "1": { "enable": true, "nsqdAddress": "", "topic": ""} }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nsq": { "1": { "enable": true, "nsqdAddress": "", "topic": "", "queueDir": "", "queueLimit": 0} }}}`, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
|
||||
@@ -29,8 +29,8 @@ import (
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/openid"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
@@ -2426,7 +2426,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
defer func() {
|
||||
if err == nil {
|
||||
if globalEtcdClient != nil {
|
||||
deleteConfigEtcd(context.Background(), globalEtcdClient, configFile)
|
||||
deleteKeyEtcd(context.Background(), globalEtcdClient, configFile)
|
||||
} else {
|
||||
// Rename config.json to config.json.deprecated only upon
|
||||
// success of this function.
|
||||
@@ -2440,7 +2440,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
|
||||
objLock := globalNSMutex.NewNSLock(context.Background(), minioMetaBucket, transactionConfigFile)
|
||||
if err = objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2492,7 +2492,7 @@ func migrateMinioSysConfig(objAPI ObjectLayer) error {
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
|
||||
objLock := globalNSMutex.NewNSLock(context.Background(), minioMetaBucket, transactionConfigFile)
|
||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2654,7 +2654,7 @@ func migrateV30ToV31MinioSys(objAPI ObjectLayer) error {
|
||||
}
|
||||
|
||||
cfg.Version = "31"
|
||||
cfg.OpenID.JWKS = validator.JWKSArgs{
|
||||
cfg.OpenID.JWKS = openid.JWKSArgs{
|
||||
URL: &xnet.URL{},
|
||||
}
|
||||
cfg.Policy.OPA = iampolicy.OpaArgs{
|
||||
|
||||
@@ -175,7 +175,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0644); err != nil {
|
||||
@@ -238,7 +238,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0644); err != nil {
|
||||
@@ -335,7 +335,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
for i := 3; i <= 17; i++ {
|
||||
// Create a corrupted config file
|
||||
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
"github.com/minio/minio/pkg/iam/openid"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
@@ -817,7 +817,7 @@ type serverConfigV31 struct {
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS validator.JWKSArgs `json:"jwks"`
|
||||
JWKS openid.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
@@ -872,7 +872,7 @@ type serverConfigV32 struct {
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS validator.JWKSArgs `json:"jwks"`
|
||||
JWKS openid.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
@@ -884,7 +884,7 @@ type serverConfigV32 struct {
|
||||
} `json:"policy"`
|
||||
}
|
||||
|
||||
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir, queueLimit in MQTT and kafka.
|
||||
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir, queueLimit in all notification targets.
|
||||
type serverConfigV33 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
|
||||
@@ -916,7 +916,7 @@ type serverConfigV33 struct {
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS validator.JWKSArgs `json:"jwks"`
|
||||
JWKS openid.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
@@ -926,4 +926,6 @@ type serverConfigV33 struct {
|
||||
|
||||
// Add new external policy enforcements here.
|
||||
} `json:"policy"`
|
||||
|
||||
LDAPServerConfig ldapServerConfig `json:"ldapserverconfig"`
|
||||
}
|
||||
|
||||
@@ -20,9 +20,11 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
@@ -101,6 +103,25 @@ func (sys *ConfigSys) Load(objAPI ObjectLayer) error {
|
||||
return sys.Init(objAPI)
|
||||
}
|
||||
|
||||
// WatchConfigNASDisk - watches nas disk on periodic basis.
|
||||
func (sys *ConfigSys) WatchConfigNASDisk(objAPI ObjectLayer) {
|
||||
configInterval := globalRefreshIAMInterval
|
||||
watchDisk := func() {
|
||||
ticker := time.NewTicker(configInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-GlobalServiceDoneCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
loadConfig(objAPI)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Refresh configSys in background for NAS gateway.
|
||||
go watchDisk()
|
||||
}
|
||||
|
||||
// Init - initializes config system from config.json.
|
||||
func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
@@ -116,18 +137,23 @@ func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
|
||||
// of the object layer.
|
||||
// - Write quorum not met when upgrading configuration
|
||||
// version is needed.
|
||||
for range newRetryTimerSimple(doneCh) {
|
||||
if err := initConfig(objAPI); err != nil {
|
||||
if strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
|
||||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
|
||||
logger.Info("Waiting for configuration to be initialized..")
|
||||
continue
|
||||
retryTimerCh := newRetryTimerSimple(doneCh)
|
||||
for {
|
||||
select {
|
||||
case <-retryTimerCh:
|
||||
if err := initConfig(objAPI); err != nil {
|
||||
if strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
|
||||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
|
||||
logger.Info("Waiting for configuration to be initialized..")
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
case <-globalOSSignalCh:
|
||||
return fmt.Errorf("Initializing config sub-system gracefully stopped")
|
||||
}
|
||||
break
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewConfigSys - creates new config system object.
|
||||
|
||||
133
cmd/consolelogger.go
Normal file
133
cmd/consolelogger.go
Normal file
@@ -0,0 +1,133 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
ring "container/ring"
|
||||
"context"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/logger/message/log"
|
||||
"github.com/minio/minio/cmd/logger/target/console"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"github.com/minio/minio/pkg/pubsub"
|
||||
)
|
||||
|
||||
// number of log messages to buffer
|
||||
const defaultLogBufferCount = 10000
|
||||
|
||||
//HTTPConsoleLoggerSys holds global console logger state
|
||||
type HTTPConsoleLoggerSys struct {
|
||||
pubsub *pubsub.PubSub
|
||||
console *console.Target
|
||||
nodeName string
|
||||
logBuf *ring.Ring
|
||||
}
|
||||
|
||||
// NewConsoleLogger - creates new HTTPConsoleLoggerSys with all nodes subscribed to
|
||||
// the console logging pub sub system
|
||||
func NewConsoleLogger(ctx context.Context, endpoints EndpointList) *HTTPConsoleLoggerSys {
|
||||
host, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to start console logging subsystem")
|
||||
}
|
||||
var nodeName string
|
||||
if globalIsDistXL {
|
||||
nodeName = host.Name
|
||||
}
|
||||
ps := pubsub.New()
|
||||
return &HTTPConsoleLoggerSys{
|
||||
ps, nil, nodeName, ring.New(defaultLogBufferCount),
|
||||
}
|
||||
}
|
||||
|
||||
// HasLogListeners returns true if console log listeners are registered
|
||||
// for this node or peers
|
||||
func (sys *HTTPConsoleLoggerSys) HasLogListeners() bool {
|
||||
return sys != nil && sys.pubsub.HasSubscribers()
|
||||
}
|
||||
|
||||
// Subscribe starts console logging for this node.
|
||||
func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh chan struct{}, node string, last int, filter func(entry interface{}) bool) {
|
||||
// Enable console logging for remote client even if local console logging is disabled in the config.
|
||||
if !globalServerConfig.Logger.Console.Enabled && !sys.pubsub.HasSubscribers() {
|
||||
logger.AddTarget(globalConsoleSys.Console())
|
||||
}
|
||||
|
||||
cnt := 0
|
||||
// by default send all console logs in the ring buffer unless node or limit query parameters
|
||||
// are set.
|
||||
var lastN []madmin.LogInfo
|
||||
if last > defaultLogBufferCount || last <= 0 {
|
||||
last = defaultLogBufferCount
|
||||
}
|
||||
|
||||
lastN = make([]madmin.LogInfo, last)
|
||||
r := sys.logBuf
|
||||
r.Do(func(p interface{}) {
|
||||
if p != nil && (p.(madmin.LogInfo)).SendLog(node) {
|
||||
lastN[cnt%last] = p.(madmin.LogInfo)
|
||||
cnt++
|
||||
}
|
||||
})
|
||||
// send last n console log messages in order filtered by node
|
||||
if cnt > 0 {
|
||||
for i := 0; i < last; i++ {
|
||||
entry := lastN[(cnt+i)%last]
|
||||
if (entry == madmin.LogInfo{}) {
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case subCh <- entry:
|
||||
case <-doneCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
sys.pubsub.Subscribe(subCh, doneCh, filter)
|
||||
}
|
||||
|
||||
// Console returns a console target
|
||||
func (sys *HTTPConsoleLoggerSys) Console() *HTTPConsoleLoggerSys {
|
||||
if sys.console == nil {
|
||||
sys.console = console.New()
|
||||
}
|
||||
return sys
|
||||
}
|
||||
|
||||
// Send log message 'e' to console and publish to console
|
||||
// log pubsub system
|
||||
func (sys *HTTPConsoleLoggerSys) Send(e interface{}) error {
|
||||
var lg madmin.LogInfo
|
||||
switch e := e.(type) {
|
||||
case log.Entry:
|
||||
lg = madmin.LogInfo{Entry: e, NodeName: sys.nodeName}
|
||||
case string:
|
||||
lg = madmin.LogInfo{ConsoleMsg: e, NodeName: sys.nodeName}
|
||||
}
|
||||
|
||||
sys.pubsub.Publish(lg)
|
||||
// add log to ring buffer
|
||||
sys.logBuf.Value = lg
|
||||
sys.logBuf = sys.logBuf.Next()
|
||||
|
||||
if globalServerConfig.Logger.Console.Enabled {
|
||||
return sys.console.Send(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -19,9 +19,9 @@ import "errors"
|
||||
// Error is the generic type for any error happening during decrypting
|
||||
// an object. It indicates that the object itself or its metadata was
|
||||
// modified accidentally or maliciously.
|
||||
type Error struct{ msg string }
|
||||
type Error string
|
||||
|
||||
func (e Error) Error() string { return e.msg }
|
||||
func (e Error) Error() string { return string(e) }
|
||||
|
||||
var (
|
||||
// ErrInvalidEncryptionMethod indicates that the specified SSE encryption method
|
||||
@@ -56,12 +56,14 @@ var (
|
||||
ErrIncompatibleEncryptionMethod = errors.New("Server side encryption specified with both SSE-C and SSE-S3 headers")
|
||||
)
|
||||
|
||||
var (
|
||||
errMissingInternalIV = Error{"The object metadata is missing the internal encryption IV"}
|
||||
errMissingInternalSealAlgorithm = Error{"The object metadata is missing the internal seal algorithm"}
|
||||
const (
|
||||
errMissingInternalIV Error = "The object metadata is missing the internal encryption IV"
|
||||
errMissingInternalSealAlgorithm Error = "The object metadata is missing the internal seal algorithm"
|
||||
|
||||
errInvalidInternalIV = Error{"The internal encryption IV is malformed"}
|
||||
errInvalidInternalSealAlgorithm = Error{"The internal seal algorithm is invalid and not supported"}
|
||||
errInvalidInternalIV Error = "The internal encryption IV is malformed"
|
||||
errInvalidInternalSealAlgorithm Error = "The internal seal algorithm is invalid and not supported"
|
||||
|
||||
errMissingUpdatedKey Error = "The key update returned no error but also no sealed key"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -83,6 +83,13 @@ func RemoveSensitiveHeaders(h http.Header) {
|
||||
h.Del(SSECopyKey)
|
||||
}
|
||||
|
||||
// IsRequested returns true if the HTTP headers indicates
|
||||
// that any form server-side encryption (SSE-C, SSE-S3 or SSE-KMS)
|
||||
// is requested.
|
||||
func IsRequested(h http.Header) bool {
|
||||
return S3.IsRequested(h) || SSEC.IsRequested(h) || SSECopy.IsRequested(h) || S3KMS.IsRequested(h)
|
||||
}
|
||||
|
||||
// S3 represents AWS SSE-S3. It provides functionality to handle
|
||||
// SSE-S3 requests.
|
||||
var S3 = s3{}
|
||||
|
||||
@@ -20,6 +20,29 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsRequested(t *testing.T) {
|
||||
for i, test := range kmsIsRequestedTests {
|
||||
if got := IsRequested(test.Header) && S3KMS.IsRequested(test.Header); got != test.Expected {
|
||||
t.Errorf("SSE-KMS: Test %d: Wanted %v but got %v", i, test.Expected, got)
|
||||
}
|
||||
}
|
||||
for i, test := range s3IsRequestedTests {
|
||||
if got := IsRequested(test.Header) && S3.IsRequested(test.Header); got != test.Expected {
|
||||
t.Errorf("SSE-S3: Test %d: Wanted %v but got %v", i, test.Expected, got)
|
||||
}
|
||||
}
|
||||
for i, test := range ssecIsRequestedTests {
|
||||
if got := IsRequested(test.Header) && SSEC.IsRequested(test.Header); got != test.Expected {
|
||||
t.Errorf("SSE-C: Test %d: Wanted %v but got %v", i, test.Expected, got)
|
||||
}
|
||||
}
|
||||
for i, test := range ssecCopyIsRequestedTests {
|
||||
if got := IsRequested(test.Header) && SSECopy.IsRequested(test.Header); got != test.Expected {
|
||||
t.Errorf("SSE-C: Test %d: Wanted %v but got %v", i, test.Expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var kmsIsRequestedTests = []struct {
|
||||
Header http.Header
|
||||
Expected bool
|
||||
|
||||
@@ -108,7 +108,7 @@ func (key *ObjectKey) Unseal(extKey [32]byte, sealedKey SealedKey, domain, bucke
|
||||
)
|
||||
switch sealedKey.Algorithm {
|
||||
default:
|
||||
return Error{fmt.Sprintf("The sealing algorithm '%s' is not supported", sealedKey.Algorithm)}
|
||||
return Error(fmt.Sprintf("The sealing algorithm '%s' is not supported", sealedKey.Algorithm))
|
||||
case SealAlgorithm:
|
||||
mac := hmac.New(sha256.New, extKey[:])
|
||||
mac.Write(sealedKey.IV[:])
|
||||
|
||||
@@ -86,6 +86,19 @@ type KMS interface {
|
||||
// referenced by the keyID. The provided context must
|
||||
// match the context used to generate the sealed key.
|
||||
UnsealKey(keyID string, sealedKey []byte, context Context) (key [32]byte, err error)
|
||||
|
||||
// UpdateKey re-wraps the sealedKey if the master key, referenced by
|
||||
// `keyID`, has changed in the meantime. This usually happens when the
|
||||
// KMS operator performs a key-rotation operation of the master key.
|
||||
// UpdateKey fails if the provided sealedKey cannot be decrypted using
|
||||
// the master key referenced by keyID.
|
||||
//
|
||||
// UpdateKey makes no guarantees whatsoever about whether the returned
|
||||
// rotatedKey is actually different from the sealedKey. If nothing has
|
||||
// changed at the KMS or if the KMS does not support updating generated
|
||||
// keys this method may behave like a NOP and just return the sealedKey
|
||||
// itself.
|
||||
UpdateKey(keyID string, sealedKey []byte, context Context) (rotatedKey []byte, err error)
|
||||
}
|
||||
|
||||
type masterKeyKMS struct {
|
||||
@@ -126,6 +139,13 @@ func (kms *masterKeyKMS) UnsealKey(keyID string, sealedKey []byte, ctx Context)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func (kms *masterKeyKMS) UpdateKey(keyID string, sealedKey []byte, ctx Context) ([]byte, error) {
|
||||
if _, err := kms.UnsealKey(keyID, sealedKey, ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sealedKey, nil // The master key cannot update data keys -> Do nothing.
|
||||
}
|
||||
|
||||
func (kms *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte) {
|
||||
if context == nil {
|
||||
context = Context{}
|
||||
|
||||
@@ -51,11 +51,20 @@ func TestMasterKeyKMS(t *testing.T) {
|
||||
t.Errorf("Test %d: KMS failed to unseal the generated key: %v", i, err)
|
||||
}
|
||||
if err == nil && test.ShouldFail {
|
||||
t.Errorf("Test %d: KMS unsealed the generated successfully but should have failed", i)
|
||||
t.Errorf("Test %d: KMS unsealed the generated key successfully but should have failed", i)
|
||||
}
|
||||
if !test.ShouldFail && !bytes.Equal(key[:], unsealedKey[:]) {
|
||||
t.Errorf("Test %d: The generated and unsealed key differ", i)
|
||||
}
|
||||
|
||||
rotatedKey, err := kms.UpdateKey(test.UnsealKeyID, sealedKey, test.UnsealContext)
|
||||
if err == nil && test.ShouldFail {
|
||||
t.Errorf("Test %d: KMS updated the generated key successfully but should have failed", i)
|
||||
}
|
||||
if !test.ShouldFail && !bytes.Equal(rotatedKey, sealedKey[:]) {
|
||||
t.Errorf("Test %d: The updated and sealed key differ", i)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ package crypto
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -119,28 +120,46 @@ func CreateMultipartMetadata(metadata map[string]string) map[string]string {
|
||||
return metadata
|
||||
}
|
||||
|
||||
// CreateMetadata encodes the keyID, the sealed kms data key and the sealed key
|
||||
// into the metadata and returns the modified metadata. It allocates a new
|
||||
// metadata map if metadata is nil.
|
||||
// CreateMetadata encodes the sealed object key into the metadata and returns
|
||||
// the modified metadata. If the keyID and the kmsKey is not empty it encodes
|
||||
// both into the metadata as well. It allocates a new metadata map if metadata
|
||||
// is nil.
|
||||
func (s3) CreateMetadata(metadata map[string]string, keyID string, kmsKey []byte, sealedKey SealedKey) map[string]string {
|
||||
if sealedKey.Algorithm != SealAlgorithm {
|
||||
logger.CriticalIf(context.Background(), fmt.Errorf("The seal algorithm '%s' is invalid for SSE-S3", sealedKey.Algorithm))
|
||||
}
|
||||
|
||||
// There are two possibilites:
|
||||
// - We use a KMS -> There must be non-empty key ID and a KMS data key.
|
||||
// - We use a K/V -> There must be no key ID and no KMS data key.
|
||||
// Otherwise, the caller has passed an invalid argument combination.
|
||||
if keyID == "" && len(kmsKey) != 0 {
|
||||
logger.CriticalIf(context.Background(), errors.New("The key ID must not be empty if a KMS data key is present"))
|
||||
}
|
||||
if keyID != "" && len(kmsKey) == 0 {
|
||||
logger.CriticalIf(context.Background(), errors.New("The KMS data key must not be empty if a key ID is present"))
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = map[string]string{}
|
||||
}
|
||||
metadata[S3KMSKeyID] = keyID
|
||||
|
||||
metadata[SSESealAlgorithm] = sealedKey.Algorithm
|
||||
metadata[SSEIV] = base64.StdEncoding.EncodeToString(sealedKey.IV[:])
|
||||
metadata[S3SealedKey] = base64.StdEncoding.EncodeToString(sealedKey.Key[:])
|
||||
metadata[S3KMSSealedKey] = base64.StdEncoding.EncodeToString(kmsKey)
|
||||
if len(kmsKey) > 0 && keyID != "" { // We use a KMS -> Store key ID and sealed KMS data key.
|
||||
metadata[S3KMSKeyID] = keyID
|
||||
metadata[S3KMSSealedKey] = base64.StdEncoding.EncodeToString(kmsKey)
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
|
||||
// ParseMetadata extracts all SSE-S3 related values from the object metadata
|
||||
// and checks whether they are well-formed. It returns the KMS key-ID, the
|
||||
// sealed KMS key and the sealed object key on success.
|
||||
// and checks whether they are well-formed. It returns the sealed object key
|
||||
// on success. If the metadata contains both, a KMS master key ID and a sealed
|
||||
// KMS data key it returns both. If the metadata does not contain neither a
|
||||
// KMS master key ID nor a sealed KMS data key it returns an empty keyID and
|
||||
// KMS data key. Otherwise, it returns an error.
|
||||
func (s3) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []byte, sealedKey SealedKey, err error) {
|
||||
// Extract all required values from object metadata
|
||||
b64IV, ok := metadata[SSEIV]
|
||||
@@ -153,15 +172,20 @@ func (s3) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []byte
|
||||
}
|
||||
b64SealedKey, ok := metadata[S3SealedKey]
|
||||
if !ok {
|
||||
return keyID, kmsKey, sealedKey, Error{"The object metadata is missing the internal sealed key for SSE-S3"}
|
||||
return keyID, kmsKey, sealedKey, Error("The object metadata is missing the internal sealed key for SSE-S3")
|
||||
}
|
||||
keyID, ok = metadata[S3KMSKeyID]
|
||||
if !ok {
|
||||
return keyID, kmsKey, sealedKey, Error{"The object metadata is missing the internal KMS key-ID for SSE-S3"}
|
||||
|
||||
// There are two possibilites:
|
||||
// - We use a KMS -> There must be a key ID and a KMS data key.
|
||||
// - We use a K/V -> There must be no key ID and no KMS data key.
|
||||
// Otherwise, the metadata is corrupted.
|
||||
keyID, idPresent := metadata[S3KMSKeyID]
|
||||
b64KMSSealedKey, kmsKeyPresent := metadata[S3KMSSealedKey]
|
||||
if !idPresent && kmsKeyPresent {
|
||||
return keyID, kmsKey, sealedKey, Error("The object metadata is missing the internal KMS key-ID for SSE-S3")
|
||||
}
|
||||
b64KMSSealedKey, ok := metadata[S3KMSSealedKey]
|
||||
if !ok {
|
||||
return keyID, kmsKey, sealedKey, Error{"The object metadata is missing the internal sealed KMS data key for SSE-S3"}
|
||||
if idPresent && !kmsKeyPresent {
|
||||
return keyID, kmsKey, sealedKey, Error("The object metadata is missing the internal sealed KMS data key for SSE-S3")
|
||||
}
|
||||
|
||||
// Check whether all extracted values are well-formed
|
||||
@@ -174,11 +198,13 @@ func (s3) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []byte
|
||||
}
|
||||
encryptedKey, err := base64.StdEncoding.DecodeString(b64SealedKey)
|
||||
if err != nil || len(encryptedKey) != 64 {
|
||||
return keyID, kmsKey, sealedKey, Error{"The internal sealed key for SSE-S3 is invalid"}
|
||||
return keyID, kmsKey, sealedKey, Error("The internal sealed key for SSE-S3 is invalid")
|
||||
}
|
||||
kmsKey, err = base64.StdEncoding.DecodeString(b64KMSSealedKey)
|
||||
if err != nil {
|
||||
return keyID, kmsKey, sealedKey, Error{"The internal sealed KMS data key for SSE-S3 is invalid"}
|
||||
if idPresent && kmsKeyPresent { // We are using a KMS -> parse the sealed KMS data key.
|
||||
kmsKey, err = base64.StdEncoding.DecodeString(b64KMSSealedKey)
|
||||
if err != nil {
|
||||
return keyID, kmsKey, sealedKey, Error("The internal sealed KMS data key for SSE-S3 is invalid")
|
||||
}
|
||||
}
|
||||
|
||||
sealedKey.Algorithm = algorithm
|
||||
@@ -218,7 +244,7 @@ func (ssec) ParseMetadata(metadata map[string]string) (sealedKey SealedKey, err
|
||||
}
|
||||
b64SealedKey, ok := metadata[SSECSealedKey]
|
||||
if !ok {
|
||||
return sealedKey, Error{"The object metadata is missing the internal sealed key for SSE-C"}
|
||||
return sealedKey, Error("The object metadata is missing the internal sealed key for SSE-C")
|
||||
}
|
||||
|
||||
// Check whether all extracted values are well-formed
|
||||
@@ -231,7 +257,7 @@ func (ssec) ParseMetadata(metadata map[string]string) (sealedKey SealedKey, err
|
||||
}
|
||||
encryptedKey, err := base64.StdEncoding.DecodeString(b64SealedKey)
|
||||
if err != nil || len(encryptedKey) != 64 {
|
||||
return sealedKey, Error{"The internal sealed key for SSE-C is invalid"}
|
||||
return sealedKey, Error("The internal sealed key for SSE-C is invalid")
|
||||
}
|
||||
|
||||
sealedKey.Algorithm = algorithm
|
||||
|
||||
@@ -125,15 +125,15 @@ var s3ParseMetadataTests = []struct {
|
||||
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
}, // 1
|
||||
{
|
||||
ExpectedErr: Error{"The object metadata is missing the internal sealed key for SSE-S3"},
|
||||
ExpectedErr: Error("The object metadata is missing the internal sealed key for SSE-S3"),
|
||||
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: ""}, DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
}, // 2
|
||||
{
|
||||
ExpectedErr: Error{"The object metadata is missing the internal KMS key-ID for SSE-S3"},
|
||||
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: "", S3SealedKey: ""}, DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
ExpectedErr: Error("The object metadata is missing the internal KMS key-ID for SSE-S3"),
|
||||
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: "", S3SealedKey: "", S3KMSSealedKey: "IAAF0b=="}, DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
}, // 3
|
||||
{
|
||||
ExpectedErr: Error{"The object metadata is missing the internal sealed KMS data key for SSE-S3"},
|
||||
ExpectedErr: Error("The object metadata is missing the internal sealed KMS data key for SSE-S3"),
|
||||
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: "", S3SealedKey: "", S3KMSKeyID: ""},
|
||||
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
}, // 4
|
||||
@@ -150,7 +150,7 @@ var s3ParseMetadataTests = []struct {
|
||||
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
}, // 6
|
||||
{
|
||||
ExpectedErr: Error{"The internal sealed key for SSE-S3 is invalid"},
|
||||
ExpectedErr: Error("The internal sealed key for SSE-S3 is invalid"),
|
||||
Metadata: map[string]string{
|
||||
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: SealAlgorithm, S3SealedKey: "",
|
||||
S3KMSKeyID: "", S3KMSSealedKey: "",
|
||||
@@ -158,7 +158,7 @@ var s3ParseMetadataTests = []struct {
|
||||
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
}, // 7
|
||||
{
|
||||
ExpectedErr: Error{"The internal sealed KMS data key for SSE-S3 is invalid"},
|
||||
ExpectedErr: Error("The internal sealed KMS data key for SSE-S3 is invalid"),
|
||||
Metadata: map[string]string{
|
||||
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: SealAlgorithm,
|
||||
S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)), S3KMSKeyID: "key-1",
|
||||
@@ -218,7 +218,7 @@ var ssecParseMetadataTests = []struct {
|
||||
{ExpectedErr: errMissingInternalIV, Metadata: map[string]string{}, SealedKey: SealedKey{}}, // 0
|
||||
{ExpectedErr: errMissingInternalSealAlgorithm, Metadata: map[string]string{SSEIV: ""}, SealedKey: SealedKey{}}, // 1
|
||||
{
|
||||
ExpectedErr: Error{"The object metadata is missing the internal sealed key for SSE-C"},
|
||||
ExpectedErr: Error("The object metadata is missing the internal sealed key for SSE-C"),
|
||||
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: ""}, SealedKey: SealedKey{},
|
||||
}, // 2
|
||||
{
|
||||
@@ -233,7 +233,7 @@ var ssecParseMetadataTests = []struct {
|
||||
SealedKey: SealedKey{},
|
||||
}, // 4
|
||||
{
|
||||
ExpectedErr: Error{"The internal sealed key for SSE-C is invalid"},
|
||||
ExpectedErr: Error("The internal sealed key for SSE-C is invalid"),
|
||||
Metadata: map[string]string{
|
||||
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: SealAlgorithm, SSECSealedKey: "",
|
||||
},
|
||||
@@ -287,7 +287,9 @@ var s3CreateMetadataTests = []struct {
|
||||
SealedDataKey []byte
|
||||
SealedKey SealedKey
|
||||
}{
|
||||
{KeyID: "", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}},
|
||||
|
||||
{KeyID: "", SealedDataKey: nil, SealedKey: SealedKey{Algorithm: SealAlgorithm}},
|
||||
{KeyID: "my-minio-key", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}},
|
||||
{KeyID: "cafebabe", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}},
|
||||
{KeyID: "deadbeef", SealedDataKey: make([]byte, 32), SealedKey: SealedKey{IV: [32]byte{0xf7}, Key: [64]byte{0xea}, Algorithm: SealAlgorithm}},
|
||||
}
|
||||
|
||||
@@ -250,3 +250,30 @@ func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (k
|
||||
copy(key[:], []byte(plainKey))
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// UpdateKey re-wraps the sealedKey if the master key referenced by the keyID
|
||||
// has been changed by the KMS operator - i.e. the master key has been rotated.
|
||||
// If the master key hasn't changed since the sealedKey has been created / updated
|
||||
// it may return the same sealedKey as rotatedKey.
|
||||
//
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UpdateKey(keyID string, sealedKey []byte, ctx Context) (rotatedKey []byte, err error) {
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/rewrap/%s", keyID), payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ciphertext, ok := s.Data["ciphertext"]
|
||||
if !ok {
|
||||
return nil, errMissingUpdatedKey
|
||||
}
|
||||
rotatedKey = []byte(ciphertext.(string))
|
||||
return rotatedKey, nil
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ func newBgHealSequence(numDisks int) *healSequence {
|
||||
hs := madmin.HealOpts{
|
||||
// Remove objects that do not have read-quorum
|
||||
Remove: true,
|
||||
ScanMode: madmin.HealDeepScan,
|
||||
ScanMode: madmin.HealNormalScan,
|
||||
}
|
||||
|
||||
return &healSequence{
|
||||
|
||||
164
cmd/daily-lifecycle-ops.go
Normal file
164
cmd/daily-lifecycle-ops.go
Normal file
@@ -0,0 +1,164 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/lifecycle"
|
||||
)
|
||||
|
||||
const (
|
||||
bgLifecycleInterval = 24 * time.Hour
|
||||
bgLifecycleTick = time.Hour
|
||||
)
|
||||
|
||||
type lifecycleOps struct {
|
||||
LastActivity time.Time
|
||||
}
|
||||
|
||||
// Register to the daily objects listing
|
||||
var globalLifecycleOps = &lifecycleOps{}
|
||||
|
||||
func getLocalBgLifecycleOpsStatus() BgLifecycleOpsStatus {
|
||||
return BgLifecycleOpsStatus{
|
||||
LastActivity: globalLifecycleOps.LastActivity,
|
||||
}
|
||||
}
|
||||
|
||||
// initDailyLifecycle starts the routine that receives the daily
|
||||
// listing of all objects and applies any matching bucket lifecycle
|
||||
// rules.
|
||||
func initDailyLifecycle() {
|
||||
go startDailyLifecycle()
|
||||
}
|
||||
|
||||
func startDailyLifecycle() {
|
||||
var objAPI ObjectLayer
|
||||
var ctx = context.Background()
|
||||
|
||||
// Wait until the object API is ready
|
||||
for {
|
||||
objAPI = newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Calculate the time of the last lifecycle operation in all peers node of the cluster
|
||||
computeLastLifecycleActivity := func(status []BgOpsStatus) time.Time {
|
||||
var lastAct time.Time
|
||||
for _, st := range status {
|
||||
if st.LifecycleOps.LastActivity.After(lastAct) {
|
||||
lastAct = st.LifecycleOps.LastActivity
|
||||
}
|
||||
}
|
||||
return lastAct
|
||||
}
|
||||
|
||||
for {
|
||||
// Check if we should perform lifecycle ops based on the last lifecycle activity, sleep one hour otherwise
|
||||
allLifecycleStatus := []BgOpsStatus{
|
||||
{LifecycleOps: getLocalBgLifecycleOpsStatus()},
|
||||
}
|
||||
if globalIsDistXL {
|
||||
allLifecycleStatus = append(allLifecycleStatus, globalNotificationSys.BackgroundOpsStatus()...)
|
||||
}
|
||||
lastAct := computeLastLifecycleActivity(allLifecycleStatus)
|
||||
if !lastAct.IsZero() && time.Since(lastAct) < bgLifecycleInterval {
|
||||
time.Sleep(bgLifecycleTick)
|
||||
}
|
||||
|
||||
// Perform one lifecycle operation
|
||||
err := lifecycleRound(ctx, objAPI)
|
||||
switch err.(type) {
|
||||
// Unable to hold a lock means there is another
|
||||
// instance doing the lifecycle round round
|
||||
case OperationTimedOut:
|
||||
time.Sleep(bgLifecycleTick)
|
||||
default:
|
||||
logger.LogIf(ctx, err)
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
var lifecycleTimeout = newDynamicTimeout(60*time.Second, time.Second)
|
||||
|
||||
func lifecycleRound(ctx context.Context, objAPI ObjectLayer) error {
|
||||
// Lock to avoid concurrent lifecycle ops from other nodes
|
||||
sweepLock := globalNSMutex.NewNSLock(ctx, "system", "daily-lifecycle-ops")
|
||||
if err := sweepLock.GetLock(lifecycleTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer sweepLock.Unlock()
|
||||
|
||||
buckets, err := objAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
// Check if the current bucket has a configured lifecycle policy, skip otherwise
|
||||
l, ok := globalLifecycleSys.Get(bucket.Name)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Calculate the common prefix of all lifecycle rules
|
||||
var prefixes []string
|
||||
for _, rule := range l.Rules {
|
||||
prefixes = append(prefixes, rule.Filter.Prefix)
|
||||
}
|
||||
commonPrefix := lcp(prefixes)
|
||||
|
||||
// List all objects and calculate lifecycle action based on object name & object modtime
|
||||
marker := ""
|
||||
for {
|
||||
res, err := objAPI.ListObjects(ctx, bucket.Name, commonPrefix, marker, "", 1000)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var objects []string
|
||||
for _, obj := range res.Objects {
|
||||
// Find the action that need to be executed
|
||||
action := l.ComputeAction(obj.Name, obj.ModTime)
|
||||
switch action {
|
||||
case lifecycle.DeleteAction:
|
||||
objects = append(objects, obj.Name)
|
||||
default:
|
||||
// Do nothing, for now.
|
||||
}
|
||||
}
|
||||
// Deletes a list of objects.
|
||||
objAPI.DeleteObjects(ctx, bucket.Name, objects)
|
||||
if !res.IsTruncated {
|
||||
// We are done here, proceed to next bucket.
|
||||
break
|
||||
}
|
||||
marker = res.NextMarker
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -48,15 +49,14 @@ func copyDailySweepListeners() []chan string {
|
||||
return listenersCopy
|
||||
}
|
||||
|
||||
var sweepTimeout = newDynamicTimeout(60*time.Second, time.Second)
|
||||
|
||||
// sweepRound will list all objects, having read quorum or not and
|
||||
// feeds to all listeners, such as the background healing
|
||||
func sweepRound(ctx context.Context, objAPI ObjectLayer) error {
|
||||
zeroDuration := time.Millisecond
|
||||
zeroDynamicTimeout := newDynamicTimeout(zeroDuration, zeroDuration)
|
||||
|
||||
// General lock so we avoid parallel daily sweep by different instances.
|
||||
sweepLock := globalNSMutex.NewNSLock("system", "daily-sweep")
|
||||
if err := sweepLock.GetLock(zeroDynamicTimeout); err != nil {
|
||||
sweepLock := globalNSMutex.NewNSLock(ctx, "system", "daily-sweep")
|
||||
if err := sweepLock.GetLock(sweepTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer sweepLock.Unlock()
|
||||
@@ -76,10 +76,22 @@ func sweepRound(ctx context.Context, objAPI ObjectLayer) error {
|
||||
|
||||
marker := ""
|
||||
for {
|
||||
if globalHTTPServer != nil {
|
||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||
waitCount := 600
|
||||
// Any requests in progress, delay the heal.
|
||||
for (globalHTTPServer.GetRequestCount() >= int32(globalXLSetCount*globalXLSetDriveCount)) &&
|
||||
waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := objAPI.ListObjectsHeal(ctx, bucket.Name, "", marker, "", 1000)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, obj := range res.Objects {
|
||||
for _, l := range copyDailySweepListeners() {
|
||||
l <- pathJoin(bucket.Name, obj.Name)
|
||||
@@ -119,9 +131,11 @@ func dailySweeper() {
|
||||
break
|
||||
}
|
||||
|
||||
// Perform a sweep round each 24 hours
|
||||
// Start with random sleep time, so as to avoid "synchronous checks" between servers
|
||||
time.Sleep(time.Duration(rand.Float64() * float64(time.Hour)))
|
||||
|
||||
for {
|
||||
if time.Since(lastSweepTime) < 24*time.Hour {
|
||||
if time.Since(lastSweepTime) < 30*24*time.Hour {
|
||||
time.Sleep(time.Hour)
|
||||
continue
|
||||
}
|
||||
@@ -133,13 +147,12 @@ func dailySweeper() {
|
||||
// instance doing the sweep round
|
||||
case OperationTimedOut:
|
||||
lastSweepTime = time.Now()
|
||||
default:
|
||||
logger.LogIf(ctx, err)
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
lastSweepTime = time.Now()
|
||||
logger.LogIf(ctx, err)
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
}
|
||||
lastSweepTime = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
658
cmd/disk-cache-backend.go
Normal file
658
cmd/disk-cache-backend.go
Normal file
@@ -0,0 +1,658 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/djherbis/atime"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/sio"
|
||||
"github.com/ncw/directio"
|
||||
)
|
||||
|
||||
const (
|
||||
// cache.json object metadata for cached objects.
|
||||
cacheMetaJSONFile = "cache.json"
|
||||
cacheDataFile = "part.1"
|
||||
cacheMetaVersion = "1.0.0"
|
||||
|
||||
cacheEnvDelimiter = ";"
|
||||
|
||||
// SSECacheEncrypted is the metadata key indicating that the object
|
||||
// is a cache entry encrypted with cache KMS master key in globalCacheKMS.
|
||||
SSECacheEncrypted = "X-Minio-Internal-Encrypted-Cache"
|
||||
)
|
||||
|
||||
// CacheChecksumInfoV1 - carries checksums of individual blocks on disk.
|
||||
type CacheChecksumInfoV1 struct {
|
||||
Algorithm string `json:"algorithm"`
|
||||
Blocksize int64 `json:"blocksize"`
|
||||
}
|
||||
|
||||
// Represents the cache metadata struct
|
||||
type cacheMeta struct {
|
||||
Version string `json:"version"`
|
||||
Stat statInfo `json:"stat"` // Stat of the current object `cache.json`.
|
||||
|
||||
// checksums of blocks on disk.
|
||||
Checksum CacheChecksumInfoV1 `json:"checksum,omitempty"`
|
||||
// Metadata map for current object.
|
||||
Meta map[string]string `json:"meta,omitempty"`
|
||||
}
|
||||
|
||||
func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) {
|
||||
if len(m.Meta) == 0 {
|
||||
m.Meta = make(map[string]string)
|
||||
m.Stat.ModTime = timeSentinel
|
||||
}
|
||||
|
||||
o = ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
}
|
||||
|
||||
// We set file info only if its valid.
|
||||
o.ModTime = m.Stat.ModTime
|
||||
o.Size = m.Stat.Size
|
||||
o.ETag = extractETag(m.Meta)
|
||||
o.ContentType = m.Meta["content-type"]
|
||||
o.ContentEncoding = m.Meta["content-encoding"]
|
||||
if storageClass, ok := m.Meta[amzStorageClass]; ok {
|
||||
o.StorageClass = storageClass
|
||||
} else {
|
||||
o.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
var (
|
||||
t time.Time
|
||||
e error
|
||||
)
|
||||
if exp, ok := m.Meta["expires"]; ok {
|
||||
if t, e = time.Parse(http.TimeFormat, exp); e == nil {
|
||||
o.Expires = t.UTC()
|
||||
}
|
||||
}
|
||||
// etag/md5Sum has already been extracted. We need to
|
||||
// remove to avoid it from appearing as part of user-defined metadata
|
||||
o.UserDefined = cleanMetadata(m.Meta)
|
||||
return o
|
||||
}
|
||||
|
||||
// represents disk cache struct
|
||||
type diskCache struct {
|
||||
dir string // caching directory
|
||||
maxDiskUsagePct int // max usage in %
|
||||
expiry int // cache expiry in days
|
||||
// mark false if drive is offline
|
||||
online bool
|
||||
// mutex to protect updates to online variable
|
||||
onlineMutex *sync.RWMutex
|
||||
// purge() listens on this channel to start the cache-purge process
|
||||
purgeChan chan struct{}
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// Inits the disk cache dir if it is not initialized already.
|
||||
func newdiskCache(dir string, expiry int, maxDiskUsagePct int) (*diskCache, error) {
|
||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||
return nil, fmt.Errorf("Unable to initialize '%s' dir, %s", dir, err)
|
||||
}
|
||||
|
||||
if expiry == 0 {
|
||||
expiry = globalCacheExpiry
|
||||
}
|
||||
cache := diskCache{
|
||||
dir: dir,
|
||||
expiry: expiry,
|
||||
maxDiskUsagePct: maxDiskUsagePct,
|
||||
purgeChan: make(chan struct{}),
|
||||
online: true,
|
||||
onlineMutex: &sync.RWMutex{},
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := directio.AlignedBlock(int(cacheBlkSize))
|
||||
return &b
|
||||
},
|
||||
},
|
||||
}
|
||||
return &cache, nil
|
||||
}
|
||||
|
||||
// Returns if the disk usage is low.
|
||||
// Disk usage is low if usage is < 80% of cacheMaxDiskUsagePct
|
||||
// Ex. for a 100GB disk, if maxUsage is configured as 70% then cacheMaxDiskUsagePct is 70G
|
||||
// hence disk usage is low if the disk usage is less than 56G (because 80% of 70G is 56G)
|
||||
func (c *diskCache) diskUsageLow() bool {
|
||||
minUsage := c.maxDiskUsagePct * 80 / 100
|
||||
di, err := disk.GetInfo(c.dir)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
return false
|
||||
}
|
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total
|
||||
return int(usedPercent) < minUsage
|
||||
}
|
||||
|
||||
// Return if the disk usage is high.
|
||||
// Disk usage is high if disk used is > cacheMaxDiskUsagePct
|
||||
func (c *diskCache) diskUsageHigh() bool {
|
||||
di, err := disk.GetInfo(c.dir)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
return true
|
||||
}
|
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total
|
||||
return int(usedPercent) > c.maxDiskUsagePct
|
||||
}
|
||||
|
||||
// Returns if size space can be allocated without exceeding
|
||||
// max disk usable for caching
|
||||
func (c *diskCache) diskAvailable(size int64) bool {
|
||||
di, err := disk.GetInfo(c.dir)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
return false
|
||||
}
|
||||
usedPercent := (di.Total - (di.Free - uint64(size))) * 100 / di.Total
|
||||
return int(usedPercent) < c.maxDiskUsagePct
|
||||
}
|
||||
|
||||
// Purge cache entries that were not accessed.
|
||||
func (c *diskCache) purge() {
|
||||
ctx := context.Background()
|
||||
for {
|
||||
olderThan := c.expiry
|
||||
for !c.diskUsageLow() {
|
||||
// delete unaccessed objects older than expiry duration
|
||||
expiry := UTCNow().AddDate(0, 0, -1*olderThan)
|
||||
olderThan /= 2
|
||||
if olderThan < 1 {
|
||||
break
|
||||
}
|
||||
deletedCount := 0
|
||||
|
||||
objDirs, err := ioutil.ReadDir(c.dir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, obj := range objDirs {
|
||||
if obj.Name() == minioMetaBucket {
|
||||
continue
|
||||
}
|
||||
// stat entry to get atime
|
||||
var fi os.FileInfo
|
||||
fi, err := os.Stat(pathJoin(c.dir, obj.Name(), cacheDataFile))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
objInfo, err := c.statCache(ctx, pathJoin(c.dir, obj.Name()))
|
||||
if err != nil {
|
||||
// delete any partially filled cache entry left behind.
|
||||
removeAll(pathJoin(c.dir, obj.Name()))
|
||||
continue
|
||||
}
|
||||
cc := cacheControlOpts(objInfo)
|
||||
|
||||
if atime.Get(fi).Before(expiry) ||
|
||||
cc.isStale(objInfo.ModTime) {
|
||||
if err = removeAll(pathJoin(c.dir, obj.Name())); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
deletedCount++
|
||||
// break early if sufficient disk space reclaimed.
|
||||
if !c.diskUsageLow() {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if deletedCount == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
lastRunTime := time.Now()
|
||||
for {
|
||||
<-c.purgeChan
|
||||
timeElapsed := time.Since(lastRunTime)
|
||||
if timeElapsed > time.Hour {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sets cache drive status
|
||||
func (c *diskCache) setOnline(status bool) {
|
||||
c.onlineMutex.Lock()
|
||||
c.online = status
|
||||
c.onlineMutex.Unlock()
|
||||
}
|
||||
|
||||
// returns true if cache drive is online
|
||||
func (c *diskCache) IsOnline() bool {
|
||||
c.onlineMutex.RLock()
|
||||
defer c.onlineMutex.RUnlock()
|
||||
return c.online
|
||||
}
|
||||
|
||||
// Stat returns ObjectInfo from disk cache
|
||||
func (c *diskCache) Stat(ctx context.Context, bucket, object string) (oi ObjectInfo, err error) {
|
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object)
|
||||
oi, err = c.statCache(ctx, cacheObjPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
oi.Bucket = bucket
|
||||
oi.Name = object
|
||||
|
||||
if err = decryptCacheObjectETag(&oi); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// statCache is a convenience function for purge() to get ObjectInfo for cached object
|
||||
func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (oi ObjectInfo, e error) {
|
||||
// Stat the file to get file size.
|
||||
metaPath := path.Join(cacheObjPath, cacheMetaJSONFile)
|
||||
f, err := os.Open(metaPath)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
meta := &cacheMeta{Version: cacheMetaVersion}
|
||||
if err := jsonLoad(f, meta); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
fi, err := os.Stat(pathJoin(cacheObjPath, cacheDataFile))
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
meta.Stat.ModTime = atime.Get(fi)
|
||||
return meta.ToObjectInfo("", ""), nil
|
||||
}
|
||||
|
||||
// saves object metadata to disk cache
|
||||
func (c *diskCache) saveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64) error {
|
||||
fileName := getCacheSHADir(c.dir, bucket, object)
|
||||
metaPath := pathJoin(fileName, cacheMetaJSONFile)
|
||||
|
||||
f, err := os.Create(metaPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
m := cacheMeta{Meta: meta, Version: cacheMetaVersion}
|
||||
m.Stat.Size = actualSize
|
||||
m.Stat.ModTime = UTCNow()
|
||||
m.Checksum = CacheChecksumInfoV1{Algorithm: HighwayHash256S.String(), Blocksize: cacheBlkSize}
|
||||
jsonData, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.Write(jsonData)
|
||||
return err
|
||||
}
|
||||
|
||||
// Backend metadata could have changed through server side copy - reset cache metadata if that is the case
|
||||
func (c *diskCache) updateMetadataIfChanged(ctx context.Context, bucket, object string, bkObjectInfo, cacheObjInfo ObjectInfo) error {
|
||||
|
||||
bkMeta := make(map[string]string)
|
||||
cacheMeta := make(map[string]string)
|
||||
for k, v := range bkObjectInfo.UserDefined {
|
||||
if hasPrefix(k, ReservedMetadataPrefix) {
|
||||
// Do not need to send any internal metadata
|
||||
continue
|
||||
}
|
||||
bkMeta[http.CanonicalHeaderKey(k)] = v
|
||||
}
|
||||
for k, v := range cacheObjInfo.UserDefined {
|
||||
if hasPrefix(k, ReservedMetadataPrefix) {
|
||||
// Do not need to send any internal metadata
|
||||
continue
|
||||
}
|
||||
cacheMeta[http.CanonicalHeaderKey(k)] = v
|
||||
}
|
||||
if !reflect.DeepEqual(bkMeta, cacheMeta) ||
|
||||
bkObjectInfo.ETag != cacheObjInfo.ETag ||
|
||||
bkObjectInfo.ContentType != cacheObjInfo.ContentType ||
|
||||
bkObjectInfo.Expires != cacheObjInfo.Expires {
|
||||
return c.saveMetadata(ctx, bucket, object, getMetadata(bkObjectInfo), bkObjectInfo.Size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCacheSHADir(dir, bucket, object string) string {
|
||||
return path.Join(dir, getSHA256Hash([]byte(path.Join(bucket, object))))
|
||||
}
|
||||
|
||||
// Cache data to disk with bitrot checksum added for each block of 1MB
|
||||
func (c *diskCache) bitrotWriteToCache(ctx context.Context, cachePath string, reader io.Reader, size uint64) (int64, error) {
|
||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
bufSize := uint64(readSizeV1)
|
||||
if size > 0 && bufSize > size {
|
||||
bufSize = size
|
||||
}
|
||||
filePath := path.Join(cachePath, cacheDataFile)
|
||||
|
||||
if filePath == "" || reader == nil {
|
||||
return 0, errInvalidArgument
|
||||
}
|
||||
|
||||
if err := checkPathLength(filePath); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
return 0, osErrToFSFileErr(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var bytesWritten int64
|
||||
|
||||
h := HighwayHash256S.New()
|
||||
|
||||
bufp := c.pool.Get().(*[]byte)
|
||||
defer c.pool.Put(bufp)
|
||||
|
||||
var n, n2 int
|
||||
for {
|
||||
n, err = io.ReadFull(reader, *bufp)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return 0, err
|
||||
}
|
||||
eof := err == io.EOF || err == io.ErrUnexpectedEOF
|
||||
if n == 0 && size != 0 {
|
||||
// Reached EOF, nothing more to be done.
|
||||
break
|
||||
}
|
||||
h.Reset()
|
||||
if _, err = h.Write((*bufp)[:n]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
hashBytes := h.Sum(nil)
|
||||
if _, err = f.Write(hashBytes); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if n2, err = f.Write((*bufp)[:n]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
bytesWritten += int64(n2)
|
||||
if eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
return bytesWritten, nil
|
||||
}
|
||||
|
||||
func newCacheEncryptReader(content io.Reader, bucket, object string, metadata map[string]string) (r io.Reader, err error) {
|
||||
objectEncryptionKey, err := newCacheEncryptMetadata(bucket, object, metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20})
|
||||
if err != nil {
|
||||
return nil, crypto.ErrInvalidCustomerKey
|
||||
}
|
||||
return reader, nil
|
||||
}
|
||||
func newCacheEncryptMetadata(bucket, object string, metadata map[string]string) ([]byte, error) {
|
||||
var sealedKey crypto.SealedKey
|
||||
if globalCacheKMS == nil {
|
||||
return nil, errKMSNotConfigured
|
||||
}
|
||||
key, encKey, err := globalCacheKMS.GenerateKey(globalCacheKMSKeyID, crypto.Context{bucket: path.Join(bucket, object)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objectKey := crypto.GenerateKey(key, rand.Reader)
|
||||
sealedKey = objectKey.Seal(key, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
|
||||
crypto.S3.CreateMetadata(metadata, globalCacheKMSKeyID, encKey, sealedKey)
|
||||
|
||||
if etag, ok := metadata["etag"]; ok {
|
||||
metadata["etag"] = hex.EncodeToString(objectKey.SealETag([]byte(etag)))
|
||||
}
|
||||
metadata[SSECacheEncrypted] = ""
|
||||
return objectKey[:], nil
|
||||
}
|
||||
|
||||
// Caches the object to disk
|
||||
func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Reader, size int64, opts ObjectOptions) error {
|
||||
if c.diskUsageHigh() {
|
||||
select {
|
||||
case c.purgeChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return errDiskFull
|
||||
}
|
||||
if !c.diskAvailable(size) {
|
||||
return errDiskFull
|
||||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
bufSize := int64(readSizeV1)
|
||||
if size > 0 && bufSize > size {
|
||||
bufSize = size
|
||||
}
|
||||
|
||||
var metadata = make(map[string]string)
|
||||
for k, v := range opts.UserDefined {
|
||||
metadata[k] = v
|
||||
}
|
||||
var reader = data
|
||||
var actualSize = uint64(size)
|
||||
var err error
|
||||
if globalCacheKMS != nil {
|
||||
reader, err = newCacheEncryptReader(data, bucket, object, metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
actualSize, _ = sio.EncryptedSize(uint64(size))
|
||||
}
|
||||
n, err := c.bitrotWriteToCache(ctx, cachePath, reader, actualSize)
|
||||
if IsErr(err, baseErrs...) {
|
||||
c.setOnline(false)
|
||||
}
|
||||
if err != nil {
|
||||
removeAll(cachePath)
|
||||
return err
|
||||
}
|
||||
if actualSize != uint64(n) {
|
||||
removeAll(cachePath)
|
||||
return IncompleteBody{}
|
||||
}
|
||||
return c.saveMetadata(ctx, bucket, object, metadata, n)
|
||||
}
|
||||
|
||||
// checks streaming bitrot checksum of cached object before returning data
|
||||
func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, offset, length int64, writer io.Writer) error {
|
||||
h := HighwayHash256S.New()
|
||||
|
||||
checksumHash := make([]byte, h.Size())
|
||||
|
||||
startBlock := offset / cacheBlkSize
|
||||
endBlock := (offset + length) / cacheBlkSize
|
||||
|
||||
// get block start offset
|
||||
var blockStartOffset int64
|
||||
if startBlock > 0 {
|
||||
blockStartOffset = (cacheBlkSize + int64(h.Size())) * startBlock
|
||||
}
|
||||
|
||||
tillLength := (cacheBlkSize + int64(h.Size())) * (endBlock - startBlock + 1)
|
||||
|
||||
// Start offset cannot be negative.
|
||||
if offset < 0 {
|
||||
logger.LogIf(ctx, errUnexpected)
|
||||
return errUnexpected
|
||||
}
|
||||
|
||||
// Writer cannot be nil.
|
||||
if writer == nil {
|
||||
logger.LogIf(ctx, errUnexpected)
|
||||
return errUnexpected
|
||||
}
|
||||
var blockOffset, blockLength int64
|
||||
rc, err := readCacheFileStream(filePath, blockStartOffset, tillLength)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bufp := c.pool.Get().(*[]byte)
|
||||
defer c.pool.Put(bufp)
|
||||
|
||||
for block := startBlock; block <= endBlock; block++ {
|
||||
switch {
|
||||
case startBlock == endBlock:
|
||||
blockOffset = offset % cacheBlkSize
|
||||
blockLength = length
|
||||
case block == startBlock:
|
||||
blockOffset = offset % cacheBlkSize
|
||||
blockLength = cacheBlkSize - blockOffset
|
||||
case block == endBlock:
|
||||
blockOffset = 0
|
||||
blockLength = (offset + length) % cacheBlkSize
|
||||
default:
|
||||
blockOffset = 0
|
||||
blockLength = cacheBlkSize
|
||||
}
|
||||
if blockLength == 0 {
|
||||
break
|
||||
}
|
||||
if _, err := io.ReadFull(rc, checksumHash); err != nil {
|
||||
return err
|
||||
}
|
||||
h.Reset()
|
||||
n, err := io.ReadFull(rc, *bufp)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
eof := err == io.EOF || err == io.ErrUnexpectedEOF
|
||||
if n == 0 && length != 0 {
|
||||
// Reached EOF, nothing more to be done.
|
||||
break
|
||||
}
|
||||
|
||||
if _, e := h.Write((*bufp)[:n]); e != nil {
|
||||
return e
|
||||
}
|
||||
hashBytes := h.Sum(nil)
|
||||
|
||||
if !bytes.Equal(hashBytes, checksumHash) {
|
||||
err = fmt.Errorf("hashes do not match expected %s, got %s",
|
||||
hex.EncodeToString(checksumHash), hex.EncodeToString(hashBytes))
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(writer, bytes.NewReader((*bufp)[blockOffset:blockOffset+blockLength])); err != nil {
|
||||
if err != io.ErrClosedPipe {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns ObjectInfo and reader for object from disk cache
|
||||
func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
var objInfo ObjectInfo
|
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object)
|
||||
|
||||
if objInfo, err = c.Stat(ctx, bucket, object); err != nil {
|
||||
return nil, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
var nsUnlocker = func() {}
|
||||
// For a directory, we need to send an reader that returns no bytes.
|
||||
if hasSuffix(object, SlashSeparator) {
|
||||
// The lock taken above is released when
|
||||
// objReader.Close() is called by the caller.
|
||||
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn, nsUnlocker)
|
||||
}
|
||||
|
||||
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts.CheckCopyPrecondFn, nsUnlocker)
|
||||
if nErr != nil {
|
||||
return nil, nErr
|
||||
}
|
||||
|
||||
filePath := path.Join(cacheObjPath, cacheDataFile)
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
err := c.bitrotReadFromCache(ctx, filePath, off, length, pw)
|
||||
if err != nil {
|
||||
removeAll(cacheObjPath)
|
||||
}
|
||||
pw.CloseWithError(err)
|
||||
}()
|
||||
// Cleanup function to cause the go routine above to exit, in
|
||||
// case of incomplete read.
|
||||
pipeCloser := func() { pr.Close() }
|
||||
return fn(pr, h, opts.CheckCopyPrecondFn, pipeCloser)
|
||||
|
||||
}
|
||||
|
||||
// Deletes the cached object
|
||||
func (c *diskCache) Delete(ctx context.Context, bucket, object string) (err error) {
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
return removeAll(cachePath)
|
||||
|
||||
}
|
||||
|
||||
// convenience function to check if object is cached on this diskCache
|
||||
func (c *diskCache) Exists(ctx context.Context, bucket, object string) bool {
|
||||
if _, err := os.Stat(getCacheSHADir(c.dir, bucket, object)); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -109,7 +109,7 @@ func parseCacheExcludes(excludes []string) ([]string, error) {
|
||||
if len(e) == 0 {
|
||||
return nil, uiErrInvalidCacheExcludesValue(nil).Msg("cache exclude path (%s) cannot be empty", e)
|
||||
}
|
||||
if hasPrefix(e, slashSeparator) {
|
||||
if hasPrefix(e, SlashSeparator) {
|
||||
return nil, uiErrInvalidCacheExcludesValue(nil).Msg("cache exclude pattern (%s) cannot start with / as prefix", e)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,537 +0,0 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
)
|
||||
|
||||
const (
|
||||
// cache.json object metadata for cached objects.
|
||||
cacheMetaJSONFile = "cache.json"
|
||||
|
||||
cacheEnvDelimiter = ";"
|
||||
)
|
||||
|
||||
// cacheFSObjects implements the cache backend operations.
|
||||
type cacheFSObjects struct {
|
||||
*FSObjects
|
||||
// caching drive path (from cache "drives" in config.json)
|
||||
dir string
|
||||
// expiry in days specified in config.json
|
||||
expiry int
|
||||
// max disk usage pct
|
||||
maxDiskUsagePct int
|
||||
// purge() listens on this channel to start the cache-purge process
|
||||
purgeChan chan struct{}
|
||||
// mark false if drive is offline
|
||||
online bool
|
||||
// mutex to protect updates to online variable
|
||||
onlineMutex *sync.RWMutex
|
||||
}
|
||||
|
||||
// Inits the cache directory if it is not init'ed already.
|
||||
// Initializing implies creation of new FS Object layer.
|
||||
func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObjects, error) {
|
||||
// Assign a new UUID for FS minio mode. Each server instance
|
||||
// gets its own UUID for temporary file transaction.
|
||||
fsUUID := mustGetUUID()
|
||||
|
||||
// Initialize meta volume, if volume already exists ignores it.
|
||||
if err := initMetaVolumeFS(dir, fsUUID); err != nil {
|
||||
return nil, fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err)
|
||||
}
|
||||
|
||||
trashPath := pathJoin(dir, minioMetaBucket, cacheTrashDir)
|
||||
if err := os.MkdirAll(trashPath, 0777); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if expiry == 0 {
|
||||
expiry = globalCacheExpiry
|
||||
}
|
||||
|
||||
// Initialize fs objects.
|
||||
fsObjects := &FSObjects{
|
||||
fsPath: dir,
|
||||
metaJSONFile: cacheMetaJSONFile,
|
||||
fsUUID: fsUUID,
|
||||
rwPool: &fsIOPool{
|
||||
readersMap: make(map[string]*lock.RLockedFile),
|
||||
},
|
||||
nsMutex: newNSLock(false),
|
||||
listPool: NewTreeWalkPool(globalLookupTimeout),
|
||||
appendFileMap: make(map[string]*fsAppendFile),
|
||||
}
|
||||
|
||||
go fsObjects.cleanupStaleMultipartUploads(context.Background(), GlobalMultipartCleanupInterval, GlobalMultipartExpiry, GlobalServiceDoneCh)
|
||||
|
||||
cacheFS := cacheFSObjects{
|
||||
FSObjects: fsObjects,
|
||||
dir: dir,
|
||||
expiry: expiry,
|
||||
maxDiskUsagePct: maxDiskUsagePct,
|
||||
purgeChan: make(chan struct{}),
|
||||
online: true,
|
||||
onlineMutex: &sync.RWMutex{},
|
||||
}
|
||||
return &cacheFS, nil
|
||||
}
|
||||
|
||||
// Returns if the disk usage is low.
|
||||
// Disk usage is low if usage is < 80% of cacheMaxDiskUsagePct
|
||||
// Ex. for a 100GB disk, if maxUsage is configured as 70% then cacheMaxDiskUsagePct is 70G
|
||||
// hence disk usage is low if the disk usage is less than 56G (because 80% of 70G is 56G)
|
||||
func (cfs *cacheFSObjects) diskUsageLow() bool {
|
||||
|
||||
minUsage := cfs.maxDiskUsagePct * 80 / 100
|
||||
di, err := disk.GetInfo(cfs.dir)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", cfs.dir)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
return false
|
||||
}
|
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total
|
||||
return int(usedPercent) < minUsage
|
||||
}
|
||||
|
||||
// Return if the disk usage is high.
|
||||
// Disk usage is high if disk used is > cacheMaxDiskUsagePct
|
||||
func (cfs *cacheFSObjects) diskUsageHigh() bool {
|
||||
di, err := disk.GetInfo(cfs.dir)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", cfs.dir)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
return true
|
||||
}
|
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total
|
||||
return int(usedPercent) > cfs.maxDiskUsagePct
|
||||
}
|
||||
|
||||
// Returns if size space can be allocated without exceeding
|
||||
// max disk usable for caching
|
||||
func (cfs *cacheFSObjects) diskAvailable(size int64) bool {
|
||||
di, err := disk.GetInfo(cfs.dir)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", cfs.dir)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
return false
|
||||
}
|
||||
usedPercent := (di.Total - (di.Free - uint64(size))) * 100 / di.Total
|
||||
return int(usedPercent) < cfs.maxDiskUsagePct
|
||||
}
|
||||
|
||||
// purges all content marked trash from the cache.
|
||||
func (cfs *cacheFSObjects) purgeTrash() {
|
||||
ticker := time.NewTicker(time.Minute * cacheCleanupInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-GlobalServiceDoneCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
trashPath := path.Join(cfs.fsPath, minioMetaBucket, cacheTrashDir)
|
||||
entries, err := readDir(trashPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, entry := range entries {
|
||||
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{})
|
||||
fi, err := fsStatVolume(ctx, pathJoin(trashPath, entry))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
dir := path.Join(trashPath, fi.Name())
|
||||
|
||||
// Delete all expired cache content.
|
||||
fsRemoveAll(ctx, dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Purge cache entries that were not accessed.
|
||||
func (cfs *cacheFSObjects) purge() {
|
||||
delimiter := slashSeparator
|
||||
maxKeys := 1000
|
||||
ctx := context.Background()
|
||||
for {
|
||||
olderThan := cfs.expiry
|
||||
for !cfs.diskUsageLow() {
|
||||
// delete unaccessed objects older than expiry duration
|
||||
expiry := UTCNow().AddDate(0, 0, -1*olderThan)
|
||||
olderThan /= 2
|
||||
if olderThan < 1 {
|
||||
break
|
||||
}
|
||||
deletedCount := 0
|
||||
buckets, err := cfs.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
// Reset cache online status if drive was offline earlier.
|
||||
if !cfs.IsOnline() {
|
||||
cfs.setOnline(true)
|
||||
}
|
||||
for _, bucket := range buckets {
|
||||
var continuationToken string
|
||||
var marker string
|
||||
for {
|
||||
objects, err := cfs.ListObjects(ctx, bucket.Name, marker, continuationToken, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if !objects.IsTruncated {
|
||||
break
|
||||
}
|
||||
marker = objects.NextMarker
|
||||
for _, object := range objects.Objects {
|
||||
// purge objects that qualify because of cache-control directives or
|
||||
// past cache expiry duration.
|
||||
if !filterFromCache(object.UserDefined) ||
|
||||
!isStaleCache(object) ||
|
||||
object.AccTime.After(expiry) {
|
||||
continue
|
||||
}
|
||||
if err = cfs.DeleteObject(ctx, bucket.Name, object.Name); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
deletedCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
if deletedCount == 0 {
|
||||
// to avoid a busy loop
|
||||
time.Sleep(time.Minute * 30)
|
||||
}
|
||||
}
|
||||
<-cfs.purgeChan
|
||||
}
|
||||
}
|
||||
|
||||
// sets cache drive status
|
||||
func (cfs *cacheFSObjects) setOnline(status bool) {
|
||||
cfs.onlineMutex.Lock()
|
||||
cfs.online = status
|
||||
cfs.onlineMutex.Unlock()
|
||||
}
|
||||
|
||||
// returns true if cache drive is online
|
||||
func (cfs *cacheFSObjects) IsOnline() bool {
|
||||
cfs.onlineMutex.RLock()
|
||||
defer cfs.onlineMutex.RUnlock()
|
||||
return cfs.online
|
||||
}
|
||||
|
||||
// Caches the object to disk
|
||||
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) error {
|
||||
if cfs.diskUsageHigh() {
|
||||
select {
|
||||
case cfs.purgeChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return errDiskFull
|
||||
}
|
||||
if !cfs.diskAvailable(data.Size()) {
|
||||
return errDiskFull
|
||||
}
|
||||
if _, err := cfs.GetBucketInfo(ctx, bucket); err != nil {
|
||||
pErr := cfs.MakeBucketWithLocation(ctx, bucket, "")
|
||||
if pErr != nil {
|
||||
return pErr
|
||||
}
|
||||
}
|
||||
_, err := cfs.PutObject(ctx, bucket, object, data, opts)
|
||||
// if err is due to disk being offline , mark cache drive as offline
|
||||
if IsErr(err, baseErrs...) {
|
||||
cfs.setOnline(false)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Returns the handle for the cached object
|
||||
func (cfs *cacheFSObjects) Get(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
||||
return cfs.GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
|
||||
// Deletes the cached object
|
||||
func (cfs *cacheFSObjects) Delete(ctx context.Context, bucket, object string) (err error) {
|
||||
return cfs.DeleteObject(ctx, bucket, object)
|
||||
}
|
||||
|
||||
// convenience function to check if object is cached on this cacheFSObjects
|
||||
func (cfs *cacheFSObjects) Exists(ctx context.Context, bucket, object string) bool {
|
||||
_, err := cfs.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Identical to fs PutObject operation except that it uses ETag in metadata
|
||||
// headers.
|
||||
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
|
||||
data := r.Reader
|
||||
fs := cfs.FSObjects
|
||||
// Lock the object.
|
||||
objectLock := fs.nsMutex.NewNSLock(bucket, object)
|
||||
if err := objectLock.GetLock(globalObjectTimeout); err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
defer objectLock.Unlock()
|
||||
|
||||
// No metadata is set, allocate a new one.
|
||||
meta := make(map[string]string)
|
||||
for k, v := range opts.UserDefined {
|
||||
meta[k] = v
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
// Validate if bucket name is valid and exists.
|
||||
if _, err = fs.statBucketDir(ctx, bucket); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
fsMeta := newFSMetaV1()
|
||||
fsMeta.Meta = meta
|
||||
|
||||
// This is a special case with size as '0' and object ends
|
||||
// with a slash separator, we treat it like a valid operation
|
||||
// and return success.
|
||||
if isObjectDir(object, data.Size()) {
|
||||
// Check if an object is present as one of the parent dir.
|
||||
if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
|
||||
return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object)
|
||||
}
|
||||
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
var fi os.FileInfo
|
||||
if fi, err = fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object)); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
return fsMeta.ToObjectInfo(bucket, object, fi), nil
|
||||
}
|
||||
|
||||
if err = checkPutObjectArgs(ctx, bucket, object, fs, data.Size()); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
// Check if an object is present as one of the parent dir.
|
||||
if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
|
||||
return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object)
|
||||
}
|
||||
|
||||
// Validate input data size and it can never be less than zero.
|
||||
if data.Size() < -1 {
|
||||
logger.LogIf(ctx, errInvalidArgument)
|
||||
return ObjectInfo{}, errInvalidArgument
|
||||
}
|
||||
|
||||
var wlk *lock.LockedFile
|
||||
if bucket != minioMetaBucket {
|
||||
bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix)
|
||||
fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fs.metaJSONFile)
|
||||
|
||||
wlk, err = fs.rwPool.Create(fsMetaPath)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
// This close will allow for locks to be synchronized on `fs.json`.
|
||||
defer wlk.Close()
|
||||
defer func() {
|
||||
// Remove meta file when PutObject encounters any error
|
||||
if retErr != nil {
|
||||
tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID)
|
||||
fsRemoveMeta(ctx, bucketMetaDir, fsMetaPath, tmpDir)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Uploaded object will first be written to the temporary location which will eventually
|
||||
// be renamed to the actual location. It is first written to the temporary location
|
||||
// so that cleaning it up will be easy if the server goes down.
|
||||
tempObj := mustGetUUID()
|
||||
|
||||
// Allocate a buffer to Read() from request body
|
||||
bufSize := int64(readSizeV1)
|
||||
if size := data.Size(); size > 0 && bufSize > size {
|
||||
bufSize = size
|
||||
}
|
||||
|
||||
buf := make([]byte, int(bufSize))
|
||||
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj)
|
||||
bytesWritten, err := fsCreateFile(ctx, fsTmpObjPath, data, buf, data.Size())
|
||||
if err != nil {
|
||||
fsRemoveFile(ctx, fsTmpObjPath)
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
if fsMeta.Meta["etag"] == "" {
|
||||
fsMeta.Meta["etag"] = r.MD5CurrentHexString()
|
||||
}
|
||||
// Should return IncompleteBody{} error when reader has fewer
|
||||
// bytes than specified in request header.
|
||||
if bytesWritten < data.Size() {
|
||||
fsRemoveFile(ctx, fsTmpObjPath)
|
||||
return ObjectInfo{}, IncompleteBody{}
|
||||
}
|
||||
|
||||
// Delete the temporary object in the case of a
|
||||
// failure. If PutObject succeeds, then there would be
|
||||
// nothing to delete.
|
||||
defer fsRemoveFile(ctx, fsTmpObjPath)
|
||||
|
||||
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
|
||||
fsNSObjPath := pathJoin(fs.fsPath, bucket, object)
|
||||
if err = fsRenameFile(ctx, fsTmpObjPath, fsNSObjPath); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
if bucket != minioMetaBucket {
|
||||
// Write FS metadata after a successful namespace operation.
|
||||
if _, err = fsMeta.WriteTo(wlk); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
}
|
||||
|
||||
// Stat the file to fetch timestamp, size.
|
||||
fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
// Success.
|
||||
return fsMeta.ToObjectInfo(bucket, object, fi), nil
|
||||
}
|
||||
|
||||
// Implements S3 compatible initiate multipart API. Operation here is identical
|
||||
// to fs backend implementation - with the exception that cache FS uses the uploadID
|
||||
// generated on the backend
|
||||
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, uploadID string, opts ObjectOptions) (string, error) {
|
||||
if cfs.diskUsageHigh() {
|
||||
select {
|
||||
case cfs.purgeChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return "", errDiskFull
|
||||
}
|
||||
|
||||
if _, err := cfs.GetBucketInfo(ctx, bucket); err != nil {
|
||||
pErr := cfs.MakeBucketWithLocation(ctx, bucket, "")
|
||||
if pErr != nil {
|
||||
return "", pErr
|
||||
}
|
||||
}
|
||||
fs := cfs.FSObjects
|
||||
if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil {
|
||||
return "", toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
||||
return "", toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
||||
|
||||
err := mkdirAll(uploadIDDir, 0755)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Initialize fs.json values.
|
||||
fsMeta := newFSMetaV1()
|
||||
fsMeta.Meta = opts.UserDefined
|
||||
|
||||
fsMetaBytes, err := json.Marshal(fsMeta)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0644); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return "", err
|
||||
}
|
||||
return uploadID, nil
|
||||
}
|
||||
|
||||
// moveBucketToTrash clears cacheFSObjects of bucket contents and moves it to trash folder.
|
||||
func (cfs *cacheFSObjects) moveBucketToTrash(ctx context.Context, bucket string) (err error) {
|
||||
fs := cfs.FSObjects
|
||||
bucketLock := fs.nsMutex.NewNSLock(bucket, "")
|
||||
if err = bucketLock.GetLock(globalObjectTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer bucketLock.Unlock()
|
||||
bucketDir, err := fs.getBucketDir(ctx, bucket)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
trashPath := pathJoin(cfs.fsPath, minioMetaBucket, cacheTrashDir)
|
||||
expiredDir := path.Join(trashPath, bucket)
|
||||
// Attempt to move regular bucket to expired directory.
|
||||
if err = fsRenameDir(bucketDir, expiredDir); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
// Cleanup all the bucket metadata.
|
||||
ominioMetadataBucketDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket)
|
||||
nminioMetadataBucketDir := pathJoin(trashPath, MustGetUUID())
|
||||
logger.LogIf(ctx, fsRenameDir(ominioMetadataBucketDir, nminioMetadataBucketDir))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Removes a directory only if its empty, handles long
|
||||
// paths for windows automatically.
|
||||
func fsRenameDir(dirPath, newPath string) (err error) {
|
||||
if dirPath == "" || newPath == "" {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
if err = checkPathLength(dirPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = checkPathLength(newPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = os.Rename(dirPath, newPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return errVolumeNotFound
|
||||
} else if isSysErrNotEmpty(err) {
|
||||
return errVolumeNotEmpty
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
220
cmd/disk-cache-utils.go
Normal file
220
cmd/disk-cache-utils.go
Normal file
@@ -0,0 +1,220 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
)
|
||||
|
||||
type cacheControl struct {
|
||||
expiry time.Time
|
||||
maxAge int
|
||||
sMaxAge int
|
||||
minFresh int
|
||||
maxStale int
|
||||
}
|
||||
|
||||
func (c cacheControl) isEmpty() bool {
|
||||
return c == cacheControl{}
|
||||
|
||||
}
|
||||
|
||||
func (c cacheControl) isStale(modTime time.Time) bool {
|
||||
if c.isEmpty() {
|
||||
return false
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
if c.sMaxAge > 0 && c.sMaxAge < int(now.Sub(modTime).Seconds()) {
|
||||
return true
|
||||
}
|
||||
if c.maxAge > 0 && c.maxAge < int(now.Sub(modTime).Seconds()) {
|
||||
return true
|
||||
}
|
||||
|
||||
if !c.expiry.Equal(time.Time{}) && c.expiry.Before(time.Now().Add(time.Duration(c.maxStale))) {
|
||||
return true
|
||||
}
|
||||
|
||||
if c.minFresh > 0 && c.minFresh <= int(now.Sub(modTime).Seconds()) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// returns struct with cache-control settings from user metadata.
|
||||
func cacheControlOpts(o ObjectInfo) (c cacheControl) {
|
||||
m := o.UserDefined
|
||||
if o.Expires != timeSentinel {
|
||||
c.expiry = o.Expires
|
||||
}
|
||||
|
||||
var headerVal string
|
||||
for k, v := range m {
|
||||
if strings.ToLower(k) == "cache-control" {
|
||||
headerVal = v
|
||||
}
|
||||
|
||||
}
|
||||
if headerVal == "" {
|
||||
return
|
||||
}
|
||||
headerVal = strings.ToLower(headerVal)
|
||||
headerVal = strings.TrimSpace(headerVal)
|
||||
|
||||
vals := strings.Split(headerVal, ",")
|
||||
for _, val := range vals {
|
||||
val = strings.TrimSpace(val)
|
||||
p := strings.Split(val, "=")
|
||||
|
||||
if len(p) != 2 {
|
||||
continue
|
||||
}
|
||||
if p[0] == "max-age" ||
|
||||
p[0] == "s-maxage" ||
|
||||
p[0] == "min-fresh" ||
|
||||
p[0] == "max-stale" {
|
||||
i, err := strconv.Atoi(p[1])
|
||||
if err != nil {
|
||||
return cacheControl{}
|
||||
}
|
||||
if p[0] == "max-age" {
|
||||
c.maxAge = i
|
||||
}
|
||||
if p[0] == "s-maxage" {
|
||||
c.sMaxAge = i
|
||||
}
|
||||
if p[0] == "min-fresh" {
|
||||
c.minFresh = i
|
||||
}
|
||||
if p[0] == "max-stale" {
|
||||
c.maxStale = i
|
||||
}
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// backendDownError returns true if err is due to backend failure or faulty disk if in server mode
|
||||
func backendDownError(err error) bool {
|
||||
_, backendDown := err.(BackendDown)
|
||||
return backendDown || IsErr(err, baseErrs...)
|
||||
}
|
||||
|
||||
// IsCacheable returns if the object should be saved in the cache.
|
||||
func (o ObjectInfo) IsCacheable() bool {
|
||||
return !crypto.IsEncrypted(o.UserDefined) || globalCacheKMS != nil
|
||||
}
|
||||
|
||||
// reads file cached on disk from offset upto length
|
||||
func readCacheFileStream(filePath string, offset, length int64) (io.ReadCloser, error) {
|
||||
if filePath == "" || offset < 0 {
|
||||
return nil, errInvalidArgument
|
||||
}
|
||||
if err := checkPathLength(filePath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fr, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, osErrToFSFileErr(err)
|
||||
}
|
||||
// Stat to get the size of the file at path.
|
||||
st, err := fr.Stat()
|
||||
if err != nil {
|
||||
err = osErrToFSFileErr(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = os.Chtimes(filePath, time.Now(), st.ModTime()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify if its not a regular file, since subsequent Seek is undefined.
|
||||
if !st.Mode().IsRegular() {
|
||||
return nil, errIsNotRegular
|
||||
}
|
||||
|
||||
if err = os.Chtimes(filePath, time.Now(), st.ModTime()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Seek to the requested offset.
|
||||
if offset > 0 {
|
||||
_, err = fr.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{Reader: io.LimitReader(fr, length), Closer: fr}, nil
|
||||
}
|
||||
|
||||
func isCacheEncrypted(meta map[string]string) bool {
|
||||
_, ok := meta[SSECacheEncrypted]
|
||||
return ok
|
||||
}
|
||||
|
||||
// decryptCacheObjectETag tries to decrypt the ETag saved in encrypted format using the cache KMS
|
||||
func decryptCacheObjectETag(info *ObjectInfo) error {
|
||||
// Directories are never encrypted.
|
||||
if info.IsDir {
|
||||
return nil
|
||||
}
|
||||
encrypted := crypto.S3.IsEncrypted(info.UserDefined) && isCacheEncrypted(info.UserDefined)
|
||||
|
||||
switch {
|
||||
case encrypted:
|
||||
if globalCacheKMS == nil {
|
||||
return errKMSNotConfigured
|
||||
}
|
||||
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(info.UserDefined)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
extKey, err := globalCacheKMS.UnsealKey(keyID, kmsKey, crypto.Context{info.Bucket: path.Join(info.Bucket, info.Name)})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var objectKey crypto.ObjectKey
|
||||
if err = objectKey.Unseal(extKey, sealedKey, crypto.S3.String(), info.Bucket, info.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
etagStr := tryDecryptETag(objectKey[:], info.ETag, false)
|
||||
// backend ETag was hex encoded before encrypting, so hex decode to get actual ETag
|
||||
etag, err := hex.DecodeString(etagStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.ETag = string(etag)
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
60
cmd/disk-cache-utils_test.go
Normal file
60
cmd/disk-cache-utils_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestGetCacheControlOpts(t *testing.T) {
|
||||
expiry, _ := time.Parse(http.TimeFormat, "Wed, 21 Oct 2015 07:28:00 GMT")
|
||||
|
||||
testCases := []struct {
|
||||
cacheControlHeaderVal string
|
||||
expiryHeaderVal time.Time
|
||||
expectedCacheControl cacheControl
|
||||
expectedErr bool
|
||||
}{
|
||||
{"", timeSentinel, cacheControl{}, false},
|
||||
{"max-age=2592000, public", timeSentinel, cacheControl{maxAge: 2592000, sMaxAge: 0, minFresh: 0, expiry: time.Time{}}, false},
|
||||
{"max-age=2592000, no-store", timeSentinel, cacheControl{maxAge: 2592000, sMaxAge: 0, minFresh: 0, expiry: time.Time{}}, false},
|
||||
{"must-revalidate, max-age=600", timeSentinel, cacheControl{maxAge: 600, sMaxAge: 0, minFresh: 0, expiry: time.Time{}}, false},
|
||||
{"s-maxAge=2500, max-age=600", timeSentinel, cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}}, false},
|
||||
{"s-maxAge=2500, max-age=600", expiry, cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Date(2015, time.October, 21, 07, 28, 00, 00, time.UTC)}, false},
|
||||
{"s-maxAge=2500, max-age=600s", timeSentinel, cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}}, true},
|
||||
}
|
||||
var m map[string]string
|
||||
|
||||
for i, testCase := range testCases {
|
||||
m = make(map[string]string)
|
||||
m["cache-control"] = testCase.cacheControlHeaderVal
|
||||
if testCase.expiryHeaderVal != timeSentinel {
|
||||
m["expires"] = testCase.expiryHeaderVal.String()
|
||||
}
|
||||
c := cacheControlOpts(ObjectInfo{UserDefined: m, Expires: testCase.expiryHeaderVal})
|
||||
if testCase.expectedErr && (c != cacheControl{}) {
|
||||
t.Errorf("expected err for case %d", i)
|
||||
}
|
||||
if !testCase.expectedErr && !reflect.DeepEqual(c, testCase.expectedCacheControl) {
|
||||
t.Errorf("expected %v got %v for case %d", testCase.expectedCacheControl, c, i)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
1241
cmd/disk-cache.go
1241
cmd/disk-cache.go
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2018,2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -19,34 +19,52 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// Initialize cache FS objects.
|
||||
func initCacheFSObjects(disk string, cacheMaxUse int) (*cacheFSObjects, error) {
|
||||
return newCacheFSObjects(disk, globalCacheExpiry, cacheMaxUse)
|
||||
// Initialize cache objects.
|
||||
func initCacheObjects(disk string, cacheMaxUse int) (*diskCache, error) {
|
||||
return newdiskCache(disk, globalCacheExpiry, cacheMaxUse)
|
||||
}
|
||||
|
||||
// inits diskCache struct for nDisks
|
||||
func initDiskCaches(drives []string, cacheMaxUse int, t *testing.T) (*diskCache, error) {
|
||||
var cfs []*cacheFSObjects
|
||||
func initDiskCaches(drives []string, cacheMaxUse int, t *testing.T) ([]*diskCache, error) {
|
||||
var cb []*diskCache
|
||||
for _, d := range drives {
|
||||
obj, err := initCacheFSObjects(d, cacheMaxUse)
|
||||
obj, err := initCacheObjects(d, cacheMaxUse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfs = append(cfs, obj)
|
||||
cb = append(cb, obj)
|
||||
}
|
||||
return cb, nil
|
||||
}
|
||||
|
||||
// Tests ToObjectInfo function.
|
||||
func TestCacheMetadataObjInfo(t *testing.T) {
|
||||
m := cacheMeta{Meta: nil}
|
||||
objInfo := m.ToObjectInfo("testbucket", "testobject")
|
||||
if objInfo.Size != 0 {
|
||||
t.Fatal("Unexpected object info value for Size", objInfo.Size)
|
||||
}
|
||||
if objInfo.ModTime != timeSentinel {
|
||||
t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime)
|
||||
}
|
||||
if objInfo.IsDir {
|
||||
t.Fatal("Unexpected object info value for IsDir", objInfo.IsDir)
|
||||
}
|
||||
if !objInfo.Expires.IsZero() {
|
||||
t.Fatal("Unexpected object info value for Expires ", objInfo.Expires)
|
||||
}
|
||||
return &diskCache{cfs: cfs}, nil
|
||||
}
|
||||
|
||||
// test whether a drive being offline causes
|
||||
// getCacheFS to fetch next online drive
|
||||
func TestGetCacheFS(t *testing.T) {
|
||||
// getCachedLoc to fetch next online drive
|
||||
func TestGetCachedLoc(t *testing.T) {
|
||||
for n := 1; n < 10; n++ {
|
||||
fsDirs, err := getRandomDisks(n)
|
||||
if err != nil {
|
||||
@@ -56,14 +74,15 @@ func TestGetCacheFS(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c := cacheObjects{cache: d}
|
||||
bucketName := "testbucket"
|
||||
objectName := "testobject"
|
||||
ctx := context.Background()
|
||||
// find cache drive where object would be hashed
|
||||
index := d.hashIndex(bucketName, objectName)
|
||||
index := c.hashIndex(bucketName, objectName)
|
||||
// turn off drive by setting online status to false
|
||||
d.cfs[index].online = false
|
||||
cfs, err := d.getCacheFS(ctx, bucketName, objectName)
|
||||
c.cache[index].online = false
|
||||
cfs, err := c.getCacheLoc(ctx, bucketName, objectName)
|
||||
if n == 1 && err == errDiskNotFound {
|
||||
continue
|
||||
}
|
||||
@@ -71,7 +90,7 @@ func TestGetCacheFS(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
i := -1
|
||||
for j, f := range d.cfs {
|
||||
for j, f := range c.cache {
|
||||
if f == cfs {
|
||||
i = j
|
||||
break
|
||||
@@ -84,8 +103,8 @@ func TestGetCacheFS(t *testing.T) {
|
||||
}
|
||||
|
||||
// test whether a drive being offline causes
|
||||
// getCacheFS to fetch next online drive
|
||||
func TestGetCacheFSMaxUse(t *testing.T) {
|
||||
// getCachedLoc to fetch next online drive
|
||||
func TestGetCacheMaxUse(t *testing.T) {
|
||||
for n := 1; n < 10; n++ {
|
||||
fsDirs, err := getRandomDisks(n)
|
||||
if err != nil {
|
||||
@@ -95,14 +114,16 @@ func TestGetCacheFSMaxUse(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c := cacheObjects{cache: d}
|
||||
|
||||
bucketName := "testbucket"
|
||||
objectName := "testobject"
|
||||
ctx := context.Background()
|
||||
// find cache drive where object would be hashed
|
||||
index := d.hashIndex(bucketName, objectName)
|
||||
index := c.hashIndex(bucketName, objectName)
|
||||
// turn off drive by setting online status to false
|
||||
d.cfs[index].online = false
|
||||
cfs, err := d.getCacheFS(ctx, bucketName, objectName)
|
||||
c.cache[index].online = false
|
||||
cb, err := c.getCacheLoc(ctx, bucketName, objectName)
|
||||
if n == 1 && err == errDiskNotFound {
|
||||
continue
|
||||
}
|
||||
@@ -110,8 +131,8 @@ func TestGetCacheFSMaxUse(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
i := -1
|
||||
for j, f := range d.cfs {
|
||||
if f == cfs {
|
||||
for j, f := range d {
|
||||
if f == cb {
|
||||
i = j
|
||||
break
|
||||
}
|
||||
@@ -124,17 +145,10 @@ func TestGetCacheFSMaxUse(t *testing.T) {
|
||||
|
||||
// test wildcard patterns for excluding entries from cache
|
||||
func TestCacheExclusion(t *testing.T) {
|
||||
fsDirs, err := getRandomDisks(1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
cobjects := &cacheObjects{
|
||||
cache: nil,
|
||||
}
|
||||
cconfig := CacheConfig{Expiry: 30, Drives: fsDirs}
|
||||
cobjects, err := newServerCacheObjects(cconfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cobj := cobjects.(*cacheObjects)
|
||||
GlobalServiceDoneCh <- struct{}{}
|
||||
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
objectName string
|
||||
@@ -155,8 +169,8 @@ func TestCacheExclusion(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
cobj.exclude = []string{testCase.excludePattern}
|
||||
if cobj.isCacheExclude(testCase.bucketName, testCase.objectName) != testCase.expectedResult {
|
||||
cobjects.exclude = []string{testCase.excludePattern}
|
||||
if cobjects.isCacheExclude(testCase.bucketName, testCase.objectName) != testCase.expectedResult {
|
||||
t.Fatal("Cache exclusion test failed for case ", i)
|
||||
}
|
||||
}
|
||||
@@ -172,7 +186,9 @@ func TestDiskCache(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cache := d.cfs[0]
|
||||
c := cacheObjects{cache: d}
|
||||
|
||||
cache := c.cache[0]
|
||||
ctx := context.Background()
|
||||
bucketName := "testbucket"
|
||||
objectName := "testobject"
|
||||
@@ -198,14 +214,17 @@ func TestDiskCache(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: httpMeta})
|
||||
err = cache.Put(ctx, bucketName, objectName, hashReader, hashReader.Size(), ObjectOptions{UserDefined: httpMeta})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName, opts)
|
||||
cReader, err := cache.Get(ctx, bucketName, objectName, nil, http.Header{
|
||||
"Content-Type": []string{"application/json"},
|
||||
}, opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cachedObjInfo := cReader.ObjInfo
|
||||
if !cache.Exists(ctx, bucketName, objectName) {
|
||||
t.Fatal("Expected object to exist on cache")
|
||||
}
|
||||
@@ -219,17 +238,16 @@ func TestDiskCache(t *testing.T) {
|
||||
t.Fatal("Cached content-type does not match")
|
||||
}
|
||||
writer := bytes.NewBuffer(nil)
|
||||
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "", opts)
|
||||
_, err = io.Copy(writer, cReader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ccontent := writer.Bytes(); !bytes.Equal([]byte(content), ccontent) {
|
||||
t.Errorf("wrong cached file content")
|
||||
}
|
||||
err = cache.Delete(ctx, bucketName, objectName)
|
||||
if err != nil {
|
||||
t.Errorf("object missing from cache")
|
||||
}
|
||||
cReader.Close()
|
||||
|
||||
cache.Delete(ctx, bucketName, objectName)
|
||||
online := cache.IsOnline()
|
||||
if !online {
|
||||
t.Errorf("expected cache drive to be online")
|
||||
@@ -246,7 +264,7 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cache := d.cfs[0]
|
||||
cache := d[0]
|
||||
ctx := context.Background()
|
||||
bucketName := "testbucket"
|
||||
objectName := "testobject"
|
||||
@@ -274,19 +292,20 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !cache.diskAvailable(int64(size)) {
|
||||
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: httpMeta})
|
||||
err = cache.Put(ctx, bucketName, objectName, hashReader, hashReader.Size(), ObjectOptions{UserDefined: httpMeta})
|
||||
if err != errDiskFull {
|
||||
t.Fatal("Cache max-use limit violated.")
|
||||
}
|
||||
} else {
|
||||
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: httpMeta})
|
||||
err = cache.Put(ctx, bucketName, objectName, hashReader, hashReader.Size(), ObjectOptions{UserDefined: httpMeta})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName, opts)
|
||||
cReader, err := cache.Get(ctx, bucketName, objectName, nil, nil, opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cachedObjInfo := cReader.ObjInfo
|
||||
if !cache.Exists(ctx, bucketName, objectName) {
|
||||
t.Fatal("Expected object to exist on cache")
|
||||
}
|
||||
@@ -300,92 +319,19 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
||||
t.Fatal("Cached content-type does not match")
|
||||
}
|
||||
writer := bytes.NewBuffer(nil)
|
||||
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "", opts)
|
||||
_, err = io.Copy(writer, cReader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ccontent := writer.Bytes(); !bytes.Equal([]byte(content), ccontent) {
|
||||
t.Errorf("wrong cached file content")
|
||||
}
|
||||
err = cache.Delete(ctx, bucketName, objectName)
|
||||
if err != nil {
|
||||
t.Errorf("object missing from cache")
|
||||
}
|
||||
cReader.Close()
|
||||
|
||||
cache.Delete(ctx, bucketName, objectName)
|
||||
online := cache.IsOnline()
|
||||
if !online {
|
||||
t.Errorf("expected cache drive to be online")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsCacheExcludeDirective(t *testing.T) {
|
||||
testCases := []struct {
|
||||
cacheControlOpt string
|
||||
expectedResult bool
|
||||
}{
|
||||
{"no-cache", true},
|
||||
{"no-store", true},
|
||||
{"must-revalidate", true},
|
||||
{"no-transform", false},
|
||||
{"max-age", false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
if isCacheExcludeDirective(testCase.cacheControlOpt) != testCase.expectedResult {
|
||||
t.Errorf("Cache exclude directive test failed for case %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCacheControlOpts(t *testing.T) {
|
||||
testCases := []struct {
|
||||
cacheControlHeaderVal string
|
||||
expiryHeaderVal string
|
||||
expectedCacheControl cacheControl
|
||||
expectedErr bool
|
||||
}{
|
||||
{"", "", cacheControl{}, false},
|
||||
{"max-age=2592000, public", "", cacheControl{maxAge: 2592000, sMaxAge: 0, minFresh: 0, expiry: time.Time{}, exclude: false}, false},
|
||||
{"max-age=2592000, no-store", "", cacheControl{maxAge: 2592000, sMaxAge: 0, minFresh: 0, expiry: time.Time{}, exclude: true}, false},
|
||||
{"must-revalidate, max-age=600", "", cacheControl{maxAge: 600, sMaxAge: 0, minFresh: 0, expiry: time.Time{}, exclude: true}, false},
|
||||
{"s-maxAge=2500, max-age=600", "", cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}, exclude: false}, false},
|
||||
{"s-maxAge=2500, max-age=600", "Wed, 21 Oct 2015 07:28:00 GMT", cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Date(2015, time.October, 21, 07, 28, 00, 00, time.UTC), exclude: false}, false},
|
||||
{"s-maxAge=2500, max-age=600s", "", cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}, exclude: false}, true},
|
||||
}
|
||||
var m map[string]string
|
||||
|
||||
for i, testCase := range testCases {
|
||||
m = make(map[string]string)
|
||||
m["cache-control"] = testCase.cacheControlHeaderVal
|
||||
if testCase.expiryHeaderVal != "" {
|
||||
m["expires"] = testCase.expiryHeaderVal
|
||||
}
|
||||
c, err := getCacheControlOpts(m)
|
||||
if testCase.expectedErr && err == nil {
|
||||
t.Errorf("expected err for case %d", i)
|
||||
}
|
||||
if !testCase.expectedErr && !reflect.DeepEqual(c, testCase.expectedCacheControl) {
|
||||
t.Errorf("expected %v got %v for case %d", testCase.expectedCacheControl, c, i)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterFromCache(t *testing.T) {
|
||||
testCases := []struct {
|
||||
metadata map[string]string
|
||||
expectedResult bool
|
||||
}{
|
||||
{map[string]string{"content-type": "application/json"}, false},
|
||||
{map[string]string{"cache-control": "private,no-store"}, true},
|
||||
{map[string]string{"cache-control": "no-cache,must-revalidate"}, true},
|
||||
{map[string]string{"cache-control": "no-transform"}, false},
|
||||
{map[string]string{"cache-control": "max-age=3600"}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
if filterFromCache(testCase.metadata) != testCase.expectedResult {
|
||||
t.Errorf("Cache exclude directive test failed for case %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
// getDiskUsage walks the file tree rooted at root, calling usageFn
|
||||
// for each file or directory in the tree, including root.
|
||||
func getDiskUsage(ctx context.Context, root string, usageFn usageFunc) error {
|
||||
return walk(ctx, root+slashSeparator, usageFn)
|
||||
return walk(ctx, root+SlashSeparator, usageFn)
|
||||
}
|
||||
|
||||
type usageFunc func(ctx context.Context, entry string) error
|
||||
@@ -34,7 +34,7 @@ func walk(ctx context.Context, path string, usageFn usageFunc) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if !hasSuffix(path, slashSeparator) {
|
||||
if !hasSuffix(path, SlashSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user