Compare commits
400 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b05825ffe8 | ||
|
|
39ddb78c75 | ||
|
|
12eb71828b | ||
|
|
c0a1369b73 | ||
|
|
c57159a0fe | ||
|
|
ef132c5714 | ||
|
|
c54b0c0ca1 | ||
|
|
e8e9cd3e74 | ||
|
|
f7611bcd48 | ||
|
|
19c10cb4d0 | ||
|
|
6b4c6f69af | ||
|
|
2564147ab4 | ||
|
|
c3ca954684 | ||
|
|
ce588d1489 | ||
|
|
b983da957d | ||
|
|
5e69a107d8 | ||
|
|
5085bef397 | ||
|
|
8022a6efd9 | ||
|
|
d3125857a8 | ||
|
|
78d116c487 | ||
|
|
2fc341394d | ||
|
|
6584c7ea2b | ||
|
|
2520e535a0 | ||
|
|
5efbe8a1b3 | ||
|
|
dab314900d | ||
|
|
7923b83953 | ||
|
|
80a351633f | ||
|
|
bedcb7442a | ||
|
|
2232b0b55f | ||
|
|
91576d416d | ||
|
|
3aabe45fd9 | ||
|
|
6dd26b8231 | ||
|
|
e098852a80 | ||
|
|
1e82c4a7c4 | ||
|
|
ce960565b1 | ||
|
|
755e675d5c | ||
|
|
b6c00405ec | ||
|
|
8f62935448 | ||
|
|
396d78352d | ||
|
|
8a405cab2f | ||
|
|
6d778a883f | ||
|
|
a51781e5cf | ||
|
|
90213ff1b2 | ||
|
|
6f764a8efd | ||
|
|
df35d7db9d | ||
|
|
4ba77a916d | ||
|
|
f9fecf0e76 | ||
|
|
9f9e0fe085 | ||
|
|
ee5b3622a5 | ||
|
|
14544d8d84 | ||
|
|
118270d76f | ||
|
|
fef5416b3c | ||
|
|
9f87283cd5 | ||
|
|
b8955fe577 | ||
|
|
13c3b8afe2 | ||
|
|
a8cd70f3e5 | ||
|
|
082f777281 | ||
|
|
9600e2b35e | ||
|
|
f75f707ff4 | ||
|
|
40b8d11209 | ||
|
|
c1b3f1994b | ||
|
|
18c4ecbbef | ||
|
|
dd52e5ebe9 | ||
|
|
8af1f0cc7b | ||
|
|
85e939636f | ||
|
|
d203e7e1cc | ||
|
|
4aa9ee153b | ||
|
|
5fb813a5cc | ||
|
|
817269475f | ||
|
|
2d168b532b | ||
|
|
fd4e15c116 | ||
|
|
3dfbe0f68c | ||
|
|
30135eed86 | ||
|
|
e4081aee62 | ||
|
|
df418a2783 | ||
|
|
9a65f6dc97 | ||
|
|
f04f8bbc78 | ||
|
|
ea6d61ab1f | ||
|
|
6f08edfb36 | ||
|
|
e9fdea05c6 | ||
|
|
e005910051 | ||
|
|
de2c106386 | ||
|
|
32a6dd1dd6 | ||
|
|
432aec73d9 | ||
|
|
36dae04671 | ||
|
|
9dc9f03c02 | ||
|
|
b18c0478e7 | ||
|
|
2d9860e875 | ||
|
|
d3553f8dfc | ||
|
|
e1ae90c12b | ||
|
|
34e7259f95 | ||
|
|
d0015b4d66 | ||
|
|
3467460456 | ||
|
|
64b5701971 | ||
|
|
fad59da29d | ||
|
|
91c839ad28 | ||
|
|
2786055df4 | ||
|
|
0a28c28a8c | ||
|
|
526546d588 | ||
|
|
2053b3414f | ||
|
|
ce870466ff | ||
|
|
964e354d06 | ||
|
|
8ee8ad777c | ||
|
|
82af0be1aa | ||
|
|
e8c18bc145 | ||
|
|
bd25f31100 | ||
|
|
ee7dcc2903 | ||
|
|
55ef51a99d | ||
|
|
dc2348daa5 | ||
|
|
042d7f25e4 | ||
|
|
8c1b649b2d | ||
|
|
f03ccec912 | ||
|
|
0bb65f84bb | ||
|
|
8e0910ab3e | ||
|
|
5353edcc38 | ||
|
|
3265112d04 | ||
|
|
4fdacb8b14 | ||
|
|
267f183fc8 | ||
|
|
3d22a9d84f | ||
|
|
51ec61ee94 | ||
|
|
74c2048ea9 | ||
|
|
730ac5381c | ||
|
|
6dd8a83c5a | ||
|
|
1a7e6d4768 | ||
|
|
98c950aacd | ||
|
|
94c52e3816 | ||
|
|
8766c5eb22 | ||
|
|
e0d22359e7 | ||
|
|
6dd13e68c2 | ||
|
|
633001c8ba | ||
|
|
e23a42305c | ||
|
|
63d2583e91 | ||
|
|
a2f66abbe8 | ||
|
|
b28661b673 | ||
|
|
e8791ae274 | ||
|
|
309975d477 | ||
|
|
6571641735 | ||
|
|
8757c963ba | ||
|
|
9a71f2fdfa | ||
|
|
9c26fe47b0 | ||
|
|
f3f47d8cd3 | ||
|
|
de1d39e436 | ||
|
|
ed1275a063 | ||
|
|
a7d407fa42 | ||
|
|
4e6e05f8e0 | ||
|
|
b0deea27df | ||
|
|
e98d89274f | ||
|
|
c59206bcd3 | ||
|
|
7f2d439baa | ||
|
|
5a80cbec2a | ||
|
|
2d19011a1d | ||
|
|
e82dcd195c | ||
|
|
fcb56d864c | ||
|
|
75cd4201b0 | ||
|
|
f24c017e9a | ||
|
|
b5280ba243 | ||
|
|
2a0e4b6f58 | ||
|
|
1898961ce3 | ||
|
|
236796ebd6 | ||
|
|
4e4f855b30 | ||
|
|
2db22deb93 | ||
|
|
fb8d0d7cf7 | ||
|
|
a536cf5dc0 | ||
|
|
b9b68e9331 | ||
|
|
632022971b | ||
|
|
def04f01cf | ||
|
|
bc67410548 | ||
|
|
7881791a91 | ||
|
|
d2f8f8c7ee | ||
|
|
8c32311b80 | ||
|
|
9bb88e610e | ||
|
|
d1e41695fe | ||
|
|
2aeb3fbe86 | ||
|
|
99b843a64e | ||
|
|
4f31a9a33b | ||
|
|
65ddff8899 | ||
|
|
e7c902bbbc | ||
|
|
5a5895203b | ||
|
|
7da0336ac8 | ||
|
|
3be616de3f | ||
|
|
c5bf22fd90 | ||
|
|
7c9f934875 | ||
|
|
b6f9b24b30 | ||
|
|
13cb814a0e | ||
|
|
d264d2c899 | ||
|
|
bebaff269c | ||
|
|
52b159b1db | ||
|
|
324834e4da | ||
|
|
c2ed1347d9 | ||
|
|
50f6f9fe58 | ||
|
|
48cb0ea34b | ||
|
|
6f7c99a333 | ||
|
|
3498f5b0ec | ||
|
|
21d8c0fd13 | ||
|
|
79b9a9ce46 | ||
|
|
b9b353db4b | ||
|
|
11a9b317a3 | ||
|
|
9af7d627ac | ||
|
|
76d9d54603 | ||
|
|
4c7c571875 | ||
|
|
313ba74b09 | ||
|
|
3e124315c8 | ||
|
|
78a0fd951e | ||
|
|
40852801ea | ||
|
|
f6980c4630 | ||
|
|
8fcc787cba | ||
|
|
4e6d3c093f | ||
|
|
61145361fd | ||
|
|
950b4ad9af | ||
|
|
6add646130 | ||
|
|
20e61fb362 | ||
|
|
18ced1102c | ||
|
|
d6af3c1237 | ||
|
|
5549a44566 | ||
|
|
e7af31c2ff | ||
|
|
e7971b1d55 | ||
|
|
26120d7838 | ||
|
|
bef7c01c58 | ||
|
|
6a8ccc5925 | ||
|
|
d85199e9de | ||
|
|
8608a84c23 | ||
|
|
b7226f4c82 | ||
|
|
f930ffe9e2 | ||
|
|
b50a245208 | ||
|
|
45bb11e020 | ||
|
|
b65cf281fd | ||
|
|
f781548b0c | ||
|
|
25ee8e74f7 | ||
|
|
c975d2cc7e | ||
|
|
ea66528739 | ||
|
|
e1164103d4 | ||
|
|
83fe70f710 | ||
|
|
12a6523fb2 | ||
|
|
dba61867e8 | ||
|
|
dd092f6c2b | ||
|
|
2a810c7da2 | ||
|
|
dd8c2aa5c6 | ||
|
|
9e3fce441e | ||
|
|
d4265f9a13 | ||
|
|
2fc024e880 | ||
|
|
eddf468aef | ||
|
|
b0d04b9a81 | ||
|
|
a9de303d8b | ||
|
|
69bd6df464 | ||
|
|
bfb505aa8e | ||
|
|
d732b1ff9d | ||
|
|
ef517bd0da | ||
|
|
32d837cf88 | ||
|
|
7b579caf68 | ||
|
|
272b8003d6 | ||
|
|
1c24c93f73 | ||
|
|
712abc7958 | ||
|
|
5f6d717b7a | ||
|
|
7e1661f4fa | ||
|
|
f9779b24ad | ||
|
|
f1f23f6f11 | ||
|
|
cf26c937e4 | ||
|
|
f19f957668 | ||
|
|
d6572879a8 | ||
|
|
b6ab8f50fa | ||
|
|
c82acc599a | ||
|
|
2447bb58dd | ||
|
|
a55a298e00 | ||
|
|
2929c1832d | ||
|
|
aa2d8583ad | ||
|
|
df2d75a2a3 | ||
|
|
b24b320807 | ||
|
|
c872c1f1dc | ||
|
|
a40610d331 | ||
|
|
38978eb2aa | ||
|
|
ca7c3a3278 | ||
|
|
d58fc68137 | ||
|
|
71c66464c1 | ||
|
|
bf414068a3 | ||
|
|
88959ce600 | ||
|
|
572719872d | ||
|
|
bdea19b583 | ||
|
|
eb1f9c9916 | ||
|
|
d07fb41fe8 | ||
|
|
a9cda850ca | ||
|
|
bef0318c36 | ||
|
|
3f19ea98bb | ||
|
|
c96073f985 | ||
|
|
d2f240c791 | ||
|
|
6491dfbbd6 | ||
|
|
d9cfa5fcd3 | ||
|
|
89b14639a9 | ||
|
|
3f744c0361 | ||
|
|
9ed7fb4916 | ||
|
|
4280e68de3 | ||
|
|
f162d7bd97 | ||
|
|
36990aeafd | ||
|
|
9fe51e392b | ||
|
|
7e879a45d5 | ||
|
|
1c911c5f40 | ||
|
|
bd8dc17b7a | ||
|
|
8491a29ec3 | ||
|
|
81d21850ec | ||
|
|
c6ec3fdfba | ||
|
|
88c3dd49c6 | ||
|
|
6869f6d9dd | ||
|
|
3f643acb99 | ||
|
|
ea73accefd | ||
|
|
555d54371c | ||
|
|
bab4c90c45 | ||
|
|
a2fc0b14d6 | ||
|
|
bf66e9a529 | ||
|
|
fde8c38638 | ||
|
|
e6252dee5a | ||
|
|
ecb042aa1c | ||
|
|
e29009d347 | ||
|
|
9631d65552 | ||
|
|
7b7be66fa1 | ||
|
|
b99aaab42e | ||
|
|
32bd1b31e9 | ||
|
|
f287b15e71 | ||
|
|
586466584f | ||
|
|
c0b4bf0a3e | ||
|
|
acf46cc3b5 | ||
|
|
06ef8248c3 | ||
|
|
7c2ae4eaf7 | ||
|
|
989d7af9ac | ||
|
|
8a6c3aa3cd | ||
|
|
62b560510b | ||
|
|
0edfb32621 | ||
|
|
7e0f1eb8b5 | ||
|
|
b43e8337b1 | ||
|
|
30040fba45 | ||
|
|
44cf9ac62f | ||
|
|
3b55357045 | ||
|
|
18d9a20ff6 | ||
|
|
6590aba6d2 | ||
|
|
2e81f27d27 | ||
|
|
ae3c05aa37 | ||
|
|
cef044178c | ||
|
|
c998d1ac8c | ||
|
|
3457e504cf | ||
|
|
7f0cca075a | ||
|
|
088c595e01 | ||
|
|
26b4b466df | ||
|
|
fdf691fdcc | ||
|
|
88c8c2d6cd | ||
|
|
ef585037a0 | ||
|
|
362ebdcbab | ||
|
|
b251454dd6 | ||
|
|
21c8693d9c | ||
|
|
1e7e5e297c | ||
|
|
c7f180ffa9 | ||
|
|
b8bd8d6a03 | ||
|
|
baec331e84 | ||
|
|
557f382477 | ||
|
|
23b166b318 | ||
|
|
81a481e098 | ||
|
|
5b3090dffc | ||
|
|
3ef3fefd54 | ||
|
|
28e25eac78 | ||
|
|
b0c9ae7490 | ||
|
|
143e7fe300 | ||
|
|
f09e7ca764 | ||
|
|
fae284d6b9 | ||
|
|
83d8e01c81 | ||
|
|
110458cd10 | ||
|
|
e3eec89d24 | ||
|
|
54ae364def | ||
|
|
16a100b597 | ||
|
|
cbc5d78a09 | ||
|
|
d8a2975a68 | ||
|
|
66d911653f | ||
|
|
ca6f795504 | ||
|
|
2af0f11731 | ||
|
|
c3408f4f04 | ||
|
|
b92c324254 | ||
|
|
81bee93b8d | ||
|
|
670f9788e3 | ||
|
|
f187a16962 | ||
|
|
e031f2b614 | ||
|
|
c2b7b82ef4 | ||
|
|
02ad9d6072 | ||
|
|
ea9408ccbb | ||
|
|
307765591d | ||
|
|
5d859b2178 | ||
|
|
223967fd32 | ||
|
|
274b35154c | ||
|
|
c05ced08bb | ||
|
|
c7722fbb1b | ||
|
|
f163bed40d | ||
|
|
b4772849f9 | ||
|
|
839a758a36 | ||
|
|
aebfceeafb | ||
|
|
83d7ec09c1 | ||
|
|
8c29f69b00 | ||
|
|
ce9d36d954 | ||
|
|
5c765bc63e | ||
|
|
6c7c6bec91 | ||
|
|
ed703c065d | ||
|
|
387584356f | ||
|
|
1111419d4a | ||
|
|
20378821cf | ||
|
|
ce1bfa6de8 | ||
|
|
6c26227081 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -22,4 +22,5 @@ parts/
|
||||
prime/
|
||||
stage/
|
||||
.sia_temp/
|
||||
config.json
|
||||
config.json
|
||||
healthcheck
|
||||
|
||||
31
.travis.yml
31
.travis.yml
@@ -1,10 +1,4 @@
|
||||
go_import_path: github.com/minio/minio
|
||||
sudo: required
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
dist: trusty
|
||||
|
||||
language: go
|
||||
|
||||
@@ -18,20 +12,39 @@ branches:
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
go: 1.10.3
|
||||
- CGO_ENABLED=0
|
||||
go: 1.11.4
|
||||
script:
|
||||
- make
|
||||
- diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
- diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
- make test GOFLAGS="-timeout 15m -race -v"
|
||||
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 15m "$d"; done
|
||||
- make verifiers
|
||||
- make crosscompile
|
||||
- make verify
|
||||
- make coverage
|
||||
- cd browser && yarn && yarn test && cd ..
|
||||
- os: windows
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
go: 1.11.4
|
||||
script:
|
||||
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
|
||||
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 20m "$d"; done
|
||||
- bash buildscripts/go-coverage.sh
|
||||
|
||||
before_script:
|
||||
# Add an IPv6 config - see the corresponding Travis issue
|
||||
# https://github.com/travis-ci/travis-ci/issues/8361
|
||||
- if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'; fi
|
||||
|
||||
before_install:
|
||||
- nvm install stable
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install stable ; fi
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
||||
@@ -68,4 +68,4 @@ To remove a dependency
|
||||
- Run `make pkg-remove PKG=foo/bar` from top-level directory
|
||||
|
||||
### What are the coding guidelines for Minio?
|
||||
``Minio`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](slack.minio.io).
|
||||
``Minio`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](https://slack.minio.io).
|
||||
|
||||
33
Dockerfile
33
Dockerfile
@@ -1,34 +1,39 @@
|
||||
FROM golang:1.10.1-alpine3.7
|
||||
FROM golang:1.11.4-alpine3.7
|
||||
|
||||
LABEL maintainer="Minio Inc <dev@minio.io>"
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH $PATH:$GOPATH/bin
|
||||
ENV CGO_ENABLED 0
|
||||
|
||||
WORKDIR /go/src/github.com/minio/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
go get -v -d github.com/minio/minio && \
|
||||
cd /go/src/github.com/minio/minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
|
||||
go build -ldflags "-s -w" -o /usr/bin/healthcheck dockerscripts/healthcheck.go
|
||||
|
||||
FROM alpine:3.7
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key
|
||||
|
||||
WORKDIR /go/src/github.com/minio/
|
||||
EXPOSE 9000
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
|
||||
COPY --from=0 /go/bin/minio /usr/bin/minio
|
||||
COPY --from=0 /usr/bin/healthcheck /usr/bin/healthcheck
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' && \
|
||||
apk add --no-cache --virtual .build-deps git && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
go get -v -d github.com/minio/minio && \
|
||||
cd /go/src/github.com/minio/minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
|
||||
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
|
||||
|
||||
EXPOSE 9000
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s \
|
||||
CMD /usr/bin/healthcheck.sh
|
||||
HEALTHCHECK --interval=1m CMD healthcheck
|
||||
|
||||
CMD ["minio"]
|
||||
|
||||
@@ -2,7 +2,7 @@ FROM alpine:3.7
|
||||
|
||||
LABEL maintainer="Minio Inc <dev@minio.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
|
||||
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck /usr/bin/
|
||||
COPY minio /usr/bin/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
@@ -14,7 +14,7 @@ RUN \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/healthcheck.sh
|
||||
chmod +x /usr/bin/healthcheck
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -22,7 +22,6 @@ ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s \
|
||||
CMD /usr/bin/healthcheck.sh
|
||||
HEALTHCHECK --interval=1m CMD healthcheck
|
||||
|
||||
CMD ["minio"]
|
||||
|
||||
@@ -1,8 +1,22 @@
|
||||
FROM golang:1.11.4-alpine3.7
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
|
||||
WORKDIR /go/src/github.com/minio/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
go get -v -d github.com/minio/minio && \
|
||||
cd /go/src/github.com/minio/minio/dockerscripts && \
|
||||
go build -ldflags "-s -w" -o /usr/bin/healthcheck healthcheck.go
|
||||
|
||||
FROM alpine:3.7
|
||||
|
||||
LABEL maintainer="Minio Inc <dev@minio.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
|
||||
COPY --from=0 /usr/bin/healthcheck /usr/bin/healthcheck
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
@@ -14,7 +28,7 @@ RUN \
|
||||
curl https://dl.minio.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/healthcheck.sh
|
||||
chmod +x /usr/bin/healthcheck
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -22,7 +36,6 @@ ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s \
|
||||
CMD /usr/bin/healthcheck.sh
|
||||
HEALTHCHECK --interval=1m CMD healthcheck
|
||||
|
||||
CMD ["minio"]
|
||||
|
||||
72
Dockerfile.simpleci
Normal file
72
Dockerfile.simpleci
Normal file
@@ -0,0 +1,72 @@
|
||||
#-------------------------------------------------------------
|
||||
# Stage 1: Build and Unit tests
|
||||
#-------------------------------------------------------------
|
||||
FROM golang:1.11.4
|
||||
|
||||
COPY . /go/src/github.com/minio/minio
|
||||
WORKDIR /go/src/github.com/minio/minio
|
||||
|
||||
RUN apt-get update && apt-get install -y jq
|
||||
|
||||
RUN make
|
||||
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
|
||||
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y install sudo
|
||||
RUN touch /etc/sudoers
|
||||
|
||||
RUN echo "root ALL=(ALL) ALL" >> /etc/sudoers
|
||||
RUN echo "ci ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||
RUN echo "Defaults env_reset" >> /etc/sudoers
|
||||
RUN echo 'Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go:/usr/local/go/bin"' >> /etc/sudoers
|
||||
|
||||
RUN mkdir -p /home/ci/.cache
|
||||
|
||||
RUN groupadd -g 999 ci && \
|
||||
useradd -r -u 999 -g ci ci
|
||||
|
||||
RUN chown -R ci:ci /go
|
||||
RUN chown -R ci:ci /home/ci
|
||||
|
||||
USER ci
|
||||
|
||||
RUN for d in $(go list ./... | grep -v browser); do go test -v -race --timeout 15m "$d"; done
|
||||
RUN make verifiers
|
||||
RUN make crosscompile
|
||||
RUN make coverage
|
||||
RUN make verify
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 2: Test Frontend
|
||||
#-------------------------------------------------------------
|
||||
FROM node:10.15-stretch-slim
|
||||
|
||||
COPY browser /minio/browser
|
||||
WORKDIR /minio/browser
|
||||
|
||||
RUN yarn
|
||||
RUN yarn test
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 3: Run Gateway Tests
|
||||
#-------------------------------------------------------------
|
||||
|
||||
FROM ubuntu:16.04
|
||||
|
||||
COPY --from=0 /go/src/github.com/minio/minio/minio ./minio
|
||||
COPY buildscripts/gateway-tests.sh ./gateway-tests.sh
|
||||
RUN apt-get update && apt-get install -y git wget jq curl dnsmasq
|
||||
|
||||
RUN wget https://dl.google.com/go/go1.11.4.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xzf go1.11.4.linux-amd64.tar.gz
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV LANG C.UTF-8
|
||||
ENV GOROOT /usr/local/go
|
||||
|
||||
RUN mkdir -p /go
|
||||
ENV GOPATH /go
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
|
||||
RUN ./gateway-tests.sh
|
||||
42
Makefile
42
Makefile
@@ -13,41 +13,33 @@ checks:
|
||||
@(env bash $(PWD)/buildscripts/checkgopath.sh)
|
||||
|
||||
getdeps:
|
||||
@echo "Installing golint" && go get -u github.com/golang/lint/golint
|
||||
@echo "Installing gocyclo" && go get -u github.com/fzipp/gocyclo
|
||||
@echo "Installing deadcode" && go get -u github.com/remyoudompheng/go-misc/deadcode
|
||||
@echo "Installing golint" && go get -u golang.org/x/lint/golint
|
||||
@echo "Installing staticcheck" && go get -u honnef.co/go/tools/...
|
||||
@echo "Installing misspell" && go get -u github.com/client9/misspell/cmd/misspell
|
||||
@echo "Installing ineffassign" && go get -u github.com/gordonklaus/ineffassign
|
||||
|
||||
verifiers: getdeps vet fmt lint cyclo deadcode spelling
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps vet fmt lint staticcheck spelling
|
||||
|
||||
vet:
|
||||
@echo "Running $@"
|
||||
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult cmd
|
||||
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult pkg
|
||||
@go vet github.com/minio/minio/...
|
||||
|
||||
fmt:
|
||||
@echo "Running $@"
|
||||
@gofmt -d cmd
|
||||
@gofmt -d pkg
|
||||
@gofmt -d cmd/
|
||||
@gofmt -d pkg/
|
||||
|
||||
lint:
|
||||
@echo "Running $@"
|
||||
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd...
|
||||
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg...
|
||||
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/...
|
||||
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/...
|
||||
|
||||
ineffassign:
|
||||
staticcheck:
|
||||
@echo "Running $@"
|
||||
@${GOPATH}/bin/ineffassign .
|
||||
|
||||
cyclo:
|
||||
@echo "Running $@"
|
||||
@${GOPATH}/bin/gocyclo -over 200 cmd
|
||||
@${GOPATH}/bin/gocyclo -over 200 pkg
|
||||
|
||||
deadcode:
|
||||
@echo "Running $@"
|
||||
@${GOPATH}/bin/deadcode -test $(shell go list ./...) || true
|
||||
@${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/...
|
||||
@${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/...
|
||||
|
||||
spelling:
|
||||
@${GOPATH}/bin/misspell -locale US -error `find cmd/`
|
||||
@@ -60,7 +52,7 @@ spelling:
|
||||
check: test
|
||||
test: verifiers build
|
||||
@echo "Running unit tests"
|
||||
@go test $(GOFLAGS) -tags kqueue ./...
|
||||
@CGO_ENABLED=0 go test -tags kqueue ./...
|
||||
|
||||
verify: build
|
||||
@echo "Verifying build"
|
||||
@@ -73,7 +65,8 @@ coverage: build
|
||||
# Builds minio locally.
|
||||
build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio
|
||||
@GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio
|
||||
@GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags="-s -w" -o $(PWD)/dockerscripts/healthcheck $(PWD)/dockerscripts/healthcheck.go
|
||||
|
||||
docker: build
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
@@ -102,6 +95,7 @@ install: build
|
||||
clean:
|
||||
@echo "Cleaning up all the generated files"
|
||||
@find . -name '*.test' | xargs rm -fv
|
||||
@find . -name '*~' | xargs rm -fv
|
||||
@rm -rvf minio
|
||||
@rm -rvf build
|
||||
@rm -rvf release
|
||||
|
||||
11
README.md
11
README.md
@@ -17,7 +17,7 @@ docker run -p 9000:9000 minio/minio server /data
|
||||
docker pull minio/minio:edge
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
```
|
||||
Please visit Minio Docker quickstart guide for more [here](https://docs.minio.io/docs/minio-docker-quickstart-guide)
|
||||
Note: Docker will not display the autogenerated keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use autogenerated keys with containers. Please visit Minio Docker quickstart guide for more information [here](https://docs.minio.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
## macOS
|
||||
### Homebrew
|
||||
@@ -53,6 +53,15 @@ chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
| Platform| Architecture | URL|
|
||||
| ----------| -------- | ------|
|
||||
|GNU/Linux|ppc64le|https://dl.minio.io/server/minio/release/linux-ppc64le/minio |
|
||||
```sh
|
||||
wget https://dl.minio.io/server/minio/release/linux-ppc64le/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
## Microsoft Windows
|
||||
### Binary Download
|
||||
| Platform| Architecture | URL|
|
||||
|
||||
55
appveyor.yml
55
appveyor.yml
@@ -1,55 +0,0 @@
|
||||
# version format
|
||||
version: "{build}"
|
||||
|
||||
# Operating system (build VM template)
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
# Platform.
|
||||
platform: x64
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\minio\minio
|
||||
|
||||
# Environment variables
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOROOT: c:\go110
|
||||
|
||||
# scripts that run after cloning repository
|
||||
install:
|
||||
- set PATH=%GOPATH%\bin;%GOROOT%\bin;%PATH%
|
||||
- go version
|
||||
- go env
|
||||
- python --version
|
||||
|
||||
# To run your custom scripts instead of automatic MSBuild
|
||||
build_script:
|
||||
# Compile
|
||||
# We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648
|
||||
- ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)'
|
||||
- appveyor AddCompilationMessage "Starting Compile"
|
||||
- cd c:\gopath\src\github.com\minio\minio
|
||||
- go run buildscripts/gen-ldflags.go > temp.txt
|
||||
- set /p BUILD_LDFLAGS=<temp.txt
|
||||
- go build -ldflags="%BUILD_LDFLAGS%" -o %GOPATH%\bin\minio.exe
|
||||
- appveyor AddCompilationMessage "Compile Success"
|
||||
|
||||
# To run your custom scripts instead of automatic tests
|
||||
test_script:
|
||||
# Unit tests
|
||||
- ps: Add-AppveyorTest "Unit Tests" -Outcome Running
|
||||
- mkdir build\coverage
|
||||
- for /f "" %%G in ('go list github.com/minio/minio/... ^| find /i /v "browser/"') do ( go test -v -timeout 20m -race %%G )
|
||||
- go test -v -timeout 20m -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd
|
||||
- ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
|
||||
|
||||
after_test:
|
||||
- go tool cover -html=build\coverage\coverage.txt -o build\coverage\coverage.html
|
||||
- ps: Push-AppveyorArtifact build\coverage\coverage.txt
|
||||
- ps: Push-AppveyorArtifact build\coverage\coverage.html
|
||||
# Upload coverage report.
|
||||
- "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%"
|
||||
- pip install codecov
|
||||
- codecov -X gcov -f "build\coverage\coverage.txt"
|
||||
|
||||
# to disable deployment
|
||||
deploy: off
|
||||
@@ -5,13 +5,13 @@
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>Minio Browser</title>
|
||||
<link rel="stylesheet" href="loader.css" type="text/css">
|
||||
<link rel="stylesheet" href="/minio/loader.css" type="text/css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="page-load">
|
||||
<div class="pl-inner">
|
||||
<img src="logo.svg" alt="">
|
||||
<img src="/minio/logo.svg" alt="">
|
||||
</div>
|
||||
</div>
|
||||
<div id="root"></div>
|
||||
@@ -51,6 +51,6 @@
|
||||
<![endif]-->
|
||||
|
||||
<script>currentUiVersion = 'MINIO_UI_VERSION'</script>
|
||||
<script src="index_bundle.js"></script>
|
||||
<script src="/minio/index_bundle.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -67,7 +67,7 @@ export class ObjectActions extends React.Component {
|
||||
className="fiad-action"
|
||||
onClick={this.shareObject.bind(this)}
|
||||
>
|
||||
<i className="fa fa-copy" />
|
||||
<i className="fa fa-share-alt" />
|
||||
</a>
|
||||
<a
|
||||
href=""
|
||||
|
||||
@@ -26,6 +26,8 @@ jest.mock("../../web", () => ({
|
||||
.fn(() => true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(false)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(false),
|
||||
ListObjects: jest.fn(({ bucketName }) => {
|
||||
if (bucketName === "test-deny") {
|
||||
@@ -405,7 +407,7 @@ describe("Objects actions", () => {
|
||||
store.dispatch(actionsObjects.downloadObject("obj1"))
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/bk1/${encodeURI("pre1/obj1")}?token=''`
|
||||
}${minioBrowserPrefix}/download/bk1/${encodeURI("pre1/obj1")}?token=`
|
||||
expect(setLocation).toHaveBeenCalledWith(url)
|
||||
})
|
||||
|
||||
|
||||
@@ -16,11 +16,7 @@
|
||||
|
||||
import web from "../web"
|
||||
import history from "../history"
|
||||
import {
|
||||
sortObjectsByName,
|
||||
sortObjectsBySize,
|
||||
sortObjectsByDate
|
||||
} from "../utils"
|
||||
import { sortObjectsByName, sortObjectsBySize, sortObjectsByDate } from "../utils"
|
||||
import { getCurrentBucket } from "../buckets/selectors"
|
||||
import { getCurrentPrefix, getCheckedList } from "./selectors"
|
||||
import * as alertActions from "../alert/actions"
|
||||
@@ -60,10 +56,7 @@ export const appendList = (objects, marker, isTruncated) => ({
|
||||
|
||||
export const fetchObjects = append => {
|
||||
return function(dispatch, getState) {
|
||||
const {
|
||||
buckets: { currentBucket },
|
||||
objects: { currentPrefix, marker }
|
||||
} = getState()
|
||||
const {buckets: {currentBucket}, objects: {currentPrefix, marker}} = getState()
|
||||
if (currentBucket) {
|
||||
return web
|
||||
.ListObjects({
|
||||
@@ -110,7 +103,7 @@ export const fetchObjects = append => {
|
||||
|
||||
export const sortObjects = sortBy => {
|
||||
return function(dispatch, getState) {
|
||||
const { objects } = getState()
|
||||
const {objects} = getState()
|
||||
const sortOrder = objects.sortBy == sortBy ? !objects.sortOrder : true
|
||||
dispatch(setSortBy(sortBy))
|
||||
dispatch(setSortOrder(sortOrder))
|
||||
@@ -210,30 +203,39 @@ export const shareObject = (object, days, hours, minutes) => {
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
const expiry = days * 24 * 60 * 60 + hours * 60 * 60 + minutes * 60
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName,
|
||||
expiry
|
||||
})
|
||||
.then(obj => {
|
||||
dispatch(showShareObject(object, obj.url))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
|
||||
})
|
||||
)
|
||||
})
|
||||
.catch(err => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
})
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName
|
||||
})
|
||||
.then(obj => {
|
||||
dispatch(showShareObject(object, obj.url))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
|
||||
})
|
||||
)
|
||||
})
|
||||
.catch(err => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
})
|
||||
} else {
|
||||
dispatch(showShareObject(object, `${location.host}` + '/' + `${currentBucket}` + '/' + encodeURI(objectName)))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared.`
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -279,7 +281,7 @@ export const downloadObject = object => {
|
||||
} else {
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=''`
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
window.location = url
|
||||
}
|
||||
}
|
||||
@@ -308,7 +310,7 @@ export const downloadCheckedObjects = () => {
|
||||
objects: getCheckedList(state)
|
||||
}
|
||||
if (!web.LoggedIn()) {
|
||||
const requestUrl = location.origin + "/minio/zip?token=''"
|
||||
const requestUrl = location.origin + "/minio/zip?token="
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
} else {
|
||||
return web
|
||||
@@ -319,14 +321,13 @@ export const downloadCheckedObjects = () => {
|
||||
}${minioBrowserPrefix}/zip?token=${res.token}`
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
})
|
||||
.catch(err =>
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
.catch(err => dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -349,8 +350,7 @@ const downloadZip = (url, req, dispatch) => {
|
||||
var separator = req.prefix.length > 1 ? "-" : ""
|
||||
|
||||
anchor.href = blobUrl
|
||||
anchor.download =
|
||||
req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
|
||||
anchor.download = req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
|
||||
|
||||
anchor.click()
|
||||
window.URL.revokeObjectURL(blobUrl)
|
||||
|
||||
@@ -13,8 +13,7 @@
|
||||
"setupTestFrameworkScriptFile": "./app/js/jest/setup.js",
|
||||
"testURL": "https://localhost:8080",
|
||||
"moduleNameMapper": {
|
||||
"\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$":
|
||||
"<rootDir>/app/js/jest/__mocks__/fileMock.js",
|
||||
"\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$": "<rootDir>/app/js/jest/__mocks__/fileMock.js",
|
||||
"\\.(css|scss)$": "identity-obj-proxy"
|
||||
}
|
||||
},
|
||||
@@ -40,29 +39,31 @@
|
||||
"babel-preset-es2015": "^6.14.0",
|
||||
"babel-preset-react": "^6.11.1",
|
||||
"babel-register": "^6.14.0",
|
||||
"copy-webpack-plugin": "^0.3.3",
|
||||
"copy-webpack-plugin": "^4.6.0",
|
||||
"css-loader": "^0.23.1",
|
||||
"enzyme": "^3.3.0",
|
||||
"enzyme-adapter-react-16": "^1.1.1",
|
||||
"esformatter": "^0.10.0",
|
||||
"esformatter-jsx": "^7.4.1",
|
||||
"esformatter-jsx-ignore": "^1.0.6",
|
||||
"html-webpack-plugin": "^2.30.1",
|
||||
"html-webpack-plugin": "^3.2.0",
|
||||
"jest": "^22.1.4",
|
||||
"jest-enzyme": "^4.0.2",
|
||||
"json-loader": "^0.5.4",
|
||||
"less": "^2.7.1",
|
||||
"less-loader": "^2.2.3",
|
||||
"purifycss-webpack-plugin": "^2.0.3",
|
||||
"less": "^3.9.0",
|
||||
"less-loader": "^4.1.0",
|
||||
"purgecss-webpack-plugin": "^1.4.0",
|
||||
"style-loader": "^0.13.1",
|
||||
"url-loader": "^0.5.7",
|
||||
"webpack-dev-server": "^2.11.1"
|
||||
"webpack-cli": "^3.2.0",
|
||||
"webpack-dev-server": "^3.1.14"
|
||||
},
|
||||
"dependencies": {
|
||||
"bootstrap": "^3.3.6",
|
||||
"classnames": "^2.2.3",
|
||||
"expect": "^1.20.2",
|
||||
"font-awesome": "^4.7.0",
|
||||
"glob-all": "^3.1.0",
|
||||
"history": "^4.7.2",
|
||||
"humanize": "0.0.9",
|
||||
"identity-obj-proxy": "^3.0.0",
|
||||
@@ -89,6 +90,6 @@
|
||||
"reselect": "^3.0.1",
|
||||
"superagent": "^3.8.2",
|
||||
"superagent-es6-promise": "^1.0.0",
|
||||
"webpack": "^3.10.0"
|
||||
"webpack": "^4.28.3"
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -16,11 +16,13 @@
|
||||
|
||||
var webpack = require('webpack')
|
||||
var path = require('path')
|
||||
var glob = require('glob-all')
|
||||
var CopyWebpackPlugin = require('copy-webpack-plugin')
|
||||
var purify = require("purifycss-webpack-plugin")
|
||||
var PurgecssPlugin = require('purgecss-webpack-plugin')
|
||||
|
||||
var exports = {
|
||||
context: __dirname,
|
||||
mode: 'development',
|
||||
entry: [
|
||||
path.resolve(__dirname, 'app/index.js')
|
||||
],
|
||||
@@ -99,12 +101,11 @@ var exports = {
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new purify({
|
||||
basePath: __dirname,
|
||||
paths: [
|
||||
"app/index.html",
|
||||
"app/js/*.js"
|
||||
]
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
path.join(__dirname, 'app/index.html'),
|
||||
path.join(__dirname, 'app/js/*.js')
|
||||
])
|
||||
})
|
||||
]
|
||||
}
|
||||
|
||||
@@ -16,11 +16,13 @@
|
||||
|
||||
var webpack = require('webpack')
|
||||
var path = require('path')
|
||||
var glob = require('glob-all')
|
||||
var CopyWebpackPlugin = require('copy-webpack-plugin')
|
||||
var purify = require("purifycss-webpack-plugin")
|
||||
var PurgecssPlugin = require('purgecss-webpack-plugin')
|
||||
|
||||
var exports = {
|
||||
context: __dirname,
|
||||
mode: 'production',
|
||||
entry: [
|
||||
path.resolve(__dirname, 'app/index.js')
|
||||
],
|
||||
@@ -74,16 +76,12 @@ var exports = {
|
||||
{from: 'app/img/logo.svg'},
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
new webpack.DefinePlugin({
|
||||
'process.env.NODE_ENV': '"production"'
|
||||
}),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new purify({
|
||||
basePath: __dirname,
|
||||
paths: [
|
||||
"app/index.html",
|
||||
"app/js/*.js"
|
||||
]
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
path.join(__dirname, 'app/index.html'),
|
||||
path.join(__dirname, 'app/js/*.js')
|
||||
])
|
||||
})
|
||||
]
|
||||
}
|
||||
|
||||
3591
browser/yarn.lock
3591
browser/yarn.lock
File diff suppressed because it is too large
Load Diff
35
buildscripts/cross-compile.sh
Executable file
35
buildscripts/cross-compile.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Enable tracing if set.
|
||||
[ -n "$BASH_XTRACEFD" ] && set -ex
|
||||
|
||||
function _init() {
|
||||
## All binaries are static make sure to disable CGO.
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64"
|
||||
}
|
||||
|
||||
function _build_and_sign() {
|
||||
local osarch=$1
|
||||
IFS=/ read -r -a arr <<<"$osarch"
|
||||
os="${arr[0]}"
|
||||
arch="${arr[1]}"
|
||||
package=$(go list -f '{{.ImportPath}}')
|
||||
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
|
||||
|
||||
# Go build to build the binary.
|
||||
export GOOS=$os
|
||||
export GOARCH=$arch
|
||||
go build -tags kqueue -o /dev/null
|
||||
}
|
||||
|
||||
function main() {
|
||||
echo "Testing builds for OS/Arch: ${SUPPORTED_OSARCH}"
|
||||
for each_osarch in ${SUPPORTED_OSARCH}; do
|
||||
_build_and_sign "${each_osarch}"
|
||||
done
|
||||
}
|
||||
|
||||
_init && main "$@"
|
||||
61
buildscripts/gateway-tests.sh
Executable file
61
buildscripts/gateway-tests.sh
Executable file
@@ -0,0 +1,61 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Minio Cloud Storage, (C) 2019 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
set -e
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
export SERVER_ENDPOINT=127.0.0.1:24240
|
||||
export ENABLE_HTTPS=0
|
||||
export ACCESS_KEY=minio
|
||||
export SECRET_KEY=minio123
|
||||
export MINIO_ACCESS_KEY=minio
|
||||
export MINIO_SECRET_KEY=minio123
|
||||
export AWS_ACCESS_KEY_ID=minio
|
||||
export AWS_SECRET_ACCESS_KEY=minio123
|
||||
|
||||
trap "cat server.log;cat gateway.log" SIGHUP SIGINT SIGTERM
|
||||
|
||||
./minio --quiet --json server data --address 127.0.0.1:24242 > server.log &
|
||||
sleep 3
|
||||
./minio --quiet --json gateway s3 http://127.0.0.1:24242 --address 127.0.0.1:24240 > gateway.log &
|
||||
sleep 3
|
||||
|
||||
mkdir -p /mint
|
||||
git clone https://github.com/minio/mint /mint
|
||||
cd /mint
|
||||
|
||||
export MINT_ROOT_DIR=${MINT_ROOT_DIR:-/mint}
|
||||
export MINT_RUN_CORE_DIR="$MINT_ROOT_DIR/run/core"
|
||||
export MINT_RUN_SECURITY_DIR="$MINT_ROOT_DIR/run/security"
|
||||
export MINT_MODE="full"
|
||||
export WGET="wget --quiet --no-check-certificate"
|
||||
|
||||
go get github.com/go-ini/ini
|
||||
|
||||
./create-data-files.sh
|
||||
./preinstall.sh
|
||||
|
||||
# install mint app packages
|
||||
for pkg in "build"/*/install.sh; do
|
||||
echo "Running $pkg"
|
||||
$pkg
|
||||
done
|
||||
|
||||
./postinstall.sh
|
||||
|
||||
/mint/entrypoint.sh || cat server.log gateway.log fail
|
||||
@@ -4,7 +4,7 @@ set -e
|
||||
echo "" > coverage.txt
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
go test -coverprofile=profile.out -covermode=atomic "$d"
|
||||
CGO_ENABLED=0 go test -v -coverprofile=profile.out -covermode=atomic "$d"
|
||||
if [ -f profile.out ]; then
|
||||
cat profile.out >> coverage.txt
|
||||
rm profile.out
|
||||
|
||||
@@ -68,6 +68,36 @@ function start_minio_erasure_sets()
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure_sets_ipv6()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9002" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9003" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9004" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9004.log" 2>&1 &
|
||||
minio_pids[4]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9005" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9005.log" 2>&1 &
|
||||
minio_pids[5]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9006" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9006.log" 2>&1 &
|
||||
minio_pids[6]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9007" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9007.log" 2>&1 &
|
||||
minio_pids[7]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9008" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9008.log" 2>&1 &
|
||||
minio_pids[8]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9009" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9009.log" 2>&1 &
|
||||
minio_pids[9]=$!
|
||||
|
||||
sleep 35
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure_sets()
|
||||
{
|
||||
declare -a minio_pids
|
||||
@@ -161,6 +191,34 @@ function run_test_erasure_sets() {
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_dist_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure_sets_ipv6) )
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 9); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 9); do
|
||||
rm -f "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_dist_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure_sets) )
|
||||
@@ -237,6 +295,7 @@ function run_test_gateway_s3()
|
||||
{
|
||||
minio_pid="$(start_minio_gateway_s3)"
|
||||
|
||||
export SERVER_ENDPOINT="127.0.0.1:9000"
|
||||
export ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
|
||||
export SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
@@ -279,6 +338,7 @@ function __init__()
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sed -i 's|-sS|-sSg|g' "$FUNCTIONAL_TESTS"
|
||||
chmod a+x "$FUNCTIONAL_TESTS"
|
||||
}
|
||||
|
||||
@@ -319,6 +379,13 @@ function main()
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Erasure setup as sets with ipv6"
|
||||
if ! run_test_dist_erasure_sets_ipv6; then
|
||||
echo "FAILED"
|
||||
rm -fr "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Gateway S3 setup"
|
||||
if ! run_test_gateway_s3; then
|
||||
echo "FAILED"
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
@@ -56,26 +57,28 @@ type accessControlPolicy struct {
|
||||
func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketACL", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getBucketACL if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -89,7 +92,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
Permission: "FULL_CONTROL",
|
||||
})
|
||||
if err := xml.NewEncoder(w).Encode(acl); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -103,27 +106,29 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetObjectACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetObjectACL", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getObjectACL if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if object exists.
|
||||
_, err := objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -137,7 +142,7 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
Permission: "FULL_CONTROL",
|
||||
})
|
||||
if err := xml.NewEncoder(w).Encode(acl); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -27,8 +27,8 @@ import (
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
@@ -37,177 +37,203 @@ import (
|
||||
|
||||
var (
|
||||
configJSON = []byte(`{
|
||||
"version": "29",
|
||||
"credential": {
|
||||
"accessKey": "minio",
|
||||
"secretKey": "minio123"
|
||||
"version": "33",
|
||||
"credential": {
|
||||
"accessKey": "minio",
|
||||
"secretKey": "minio123"
|
||||
},
|
||||
"region": "us-east-1",
|
||||
"worm": "off",
|
||||
"storageclass": {
|
||||
"standard": "",
|
||||
"rrs": ""
|
||||
},
|
||||
"cache": {
|
||||
"drives": [],
|
||||
"expiry": 90,
|
||||
"maxuse": 80,
|
||||
"exclude": []
|
||||
},
|
||||
"kms": {
|
||||
"vault": {
|
||||
"endpoint": "",
|
||||
"auth": {
|
||||
"type": "",
|
||||
"approle": {
|
||||
"id": "",
|
||||
"secret": ""
|
||||
}
|
||||
},
|
||||
"key-id": {
|
||||
"name": "",
|
||||
"version": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"notify": {
|
||||
"amqp": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"url": "",
|
||||
"exchange": "",
|
||||
"routingKey": "",
|
||||
"exchangeType": "",
|
||||
"deliveryMode": 0,
|
||||
"mandatory": false,
|
||||
"immediate": false,
|
||||
"durable": false,
|
||||
"internal": false,
|
||||
"noWait": false,
|
||||
"autoDeleted": false
|
||||
}
|
||||
},
|
||||
"elasticsearch": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"url": "",
|
||||
"index": ""
|
||||
}
|
||||
},
|
||||
"kafka": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"brokers": null,
|
||||
"topic": "",
|
||||
"tls": {
|
||||
"enable": false,
|
||||
"skipVerify": false,
|
||||
"clientAuth": 0
|
||||
},
|
||||
"sasl": {
|
||||
"enable": false,
|
||||
"username": "",
|
||||
"password": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"mqtt": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"broker": "",
|
||||
"topic": "",
|
||||
"qos": 0,
|
||||
"username": "",
|
||||
"password": "",
|
||||
"reconnectInterval": 0,
|
||||
"keepAliveInterval": 0,
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"mysql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"dsnString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"subject": "",
|
||||
"username": "",
|
||||
"password": "",
|
||||
"token": "",
|
||||
"secure": false,
|
||||
"pingInterval": 0,
|
||||
"streaming": {
|
||||
"enable": false,
|
||||
"clusterID": "",
|
||||
"async": false,
|
||||
"maxPubAcksInflight": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"region": "",
|
||||
"worm": "off",
|
||||
"storageclass": {
|
||||
"standard": "",
|
||||
"rrs": ""
|
||||
},
|
||||
"cache": {
|
||||
"drives": [],
|
||||
"expiry": 90,
|
||||
"maxuse": 80,
|
||||
"exclude": []
|
||||
},
|
||||
"kms": {
|
||||
"vault": {
|
||||
"endpoint": "",
|
||||
"auth": {
|
||||
"type": "",
|
||||
"approle": {
|
||||
"id": "",
|
||||
"secret": ""
|
||||
}
|
||||
},
|
||||
"key-id": {
|
||||
"name": "",
|
||||
"version": 0
|
||||
}
|
||||
"nsq": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"nsqdAddress": "",
|
||||
"topic": "",
|
||||
"tls": {
|
||||
"enable": false,
|
||||
"skipVerify": false
|
||||
}
|
||||
},
|
||||
"notify": {
|
||||
"amqp": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"url": "",
|
||||
"exchange": "",
|
||||
"routingKey": "",
|
||||
"exchangeType": "",
|
||||
"deliveryMode": 0,
|
||||
"mandatory": false,
|
||||
"immediate": false,
|
||||
"durable": false,
|
||||
"internal": false,
|
||||
"noWait": false,
|
||||
"autoDeleted": false
|
||||
}
|
||||
},
|
||||
"elasticsearch": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "",
|
||||
"url": "",
|
||||
"index": ""
|
||||
}
|
||||
},
|
||||
"kafka": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"brokers": null,
|
||||
"topic": "",
|
||||
"tls" : {
|
||||
"enable" : false,
|
||||
"skipVerify" : false,
|
||||
"clientAuth" : 0
|
||||
},
|
||||
"sasl" : {
|
||||
"enable" : false,
|
||||
"username" : "",
|
||||
"password" : ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"mqtt": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"broker": "",
|
||||
"topic": "",
|
||||
"qos": 0,
|
||||
"clientId": "",
|
||||
"username": "",
|
||||
"password": "",
|
||||
"reconnectInterval": 0,
|
||||
"keepAliveInterval": 0
|
||||
}
|
||||
},
|
||||
"mysql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "",
|
||||
"dsnString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"subject": "",
|
||||
"username": "",
|
||||
"password": "",
|
||||
"token": "",
|
||||
"secure": false,
|
||||
"pingInterval": 0,
|
||||
"streaming": {
|
||||
"enable": false,
|
||||
"clusterID": "",
|
||||
"clientID": "",
|
||||
"async": false,
|
||||
"maxPubAcksInflight": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"postgresql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "",
|
||||
"connectionString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "",
|
||||
"address": "",
|
||||
"password": "",
|
||||
"key": ""
|
||||
}
|
||||
},
|
||||
"webhook": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"endpoint": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"logger": {
|
||||
"console": {
|
||||
"enabled": true
|
||||
},
|
||||
"http": {
|
||||
"1": {
|
||||
"enabled": false,
|
||||
"endpoint": "http://user:example@localhost:9001/api/endpoint"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}`)
|
||||
}
|
||||
},
|
||||
"postgresql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"connectionString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"address": "",
|
||||
"password": "",
|
||||
"key": ""
|
||||
}
|
||||
},
|
||||
"webhook": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"endpoint": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"logger": {
|
||||
"console": {
|
||||
"enabled": true
|
||||
},
|
||||
"http": {
|
||||
"1": {
|
||||
"enabled": false,
|
||||
"endpoint": "https://username:password@example.com/api"
|
||||
}
|
||||
}
|
||||
},
|
||||
"compress": {
|
||||
"enabled": false,
|
||||
"extensions":[".txt",".log",".csv",".json"],
|
||||
"mime-types":["text/csv","text/plain","application/json"]
|
||||
},
|
||||
"openid": {
|
||||
"jwks": {
|
||||
"url": ""
|
||||
}
|
||||
},
|
||||
"policy": {
|
||||
"opa": {
|
||||
"url": "",
|
||||
"authToken": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
`)
|
||||
)
|
||||
|
||||
// adminXLTestBed - encapsulates subsystems that need to be setup for
|
||||
// admin-handler unit tests.
|
||||
type adminXLTestBed struct {
|
||||
configPath string
|
||||
xlDirs []string
|
||||
objLayer ObjectLayer
|
||||
router *mux.Router
|
||||
xlDirs []string
|
||||
objLayer ObjectLayer
|
||||
router *mux.Router
|
||||
}
|
||||
|
||||
// prepareAdminXLTestBed - helper function that setups a single-node
|
||||
@@ -243,14 +269,20 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
// Init global heal state
|
||||
initAllHealState(globalIsXL)
|
||||
|
||||
globalNotificationSys = NewNotificationSys(globalServerConfig, globalEndpoints)
|
||||
globalConfigSys = NewConfigSys()
|
||||
|
||||
globalIAMSys = NewIAMSys()
|
||||
globalIAMSys.Init(objLayer)
|
||||
|
||||
// Create new policy system.
|
||||
globalPolicySys = NewPolicySys()
|
||||
globalPolicySys.Init(objLayer)
|
||||
|
||||
globalNotificationSys = NewNotificationSys(globalServerConfig, globalEndpoints)
|
||||
globalNotificationSys.Init(objLayer)
|
||||
|
||||
// Setup admin mgmt REST API handlers.
|
||||
adminRouter := mux.NewRouter()
|
||||
registerAdminRouter(adminRouter)
|
||||
registerAdminRouter(adminRouter, true, true)
|
||||
|
||||
return &adminXLTestBed{
|
||||
xlDirs: xlDirs,
|
||||
@@ -281,8 +313,8 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
objectName := fmt.Sprintf("%s-%d", objName, i)
|
||||
_, err = atb.objLayer.PutObject(context.Background(), bucketName, objectName,
|
||||
mustGetHashReader(t, bytes.NewReader([]byte("hello")),
|
||||
int64(len("hello")), "", ""), nil, ObjectOptions{})
|
||||
mustGetPutObjReader(t, bytes.NewReader([]byte("hello")),
|
||||
int64(len("hello")), "", ""), ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create %s - %v", objectName,
|
||||
err)
|
||||
@@ -294,13 +326,13 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
|
||||
{
|
||||
objName := "mpObject"
|
||||
uploadID, err := atb.objLayer.NewMultipartUpload(context.Background(), bucketName,
|
||||
objName, nil, ObjectOptions{})
|
||||
objName, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("mp new error: %v", err)
|
||||
}
|
||||
|
||||
_, err = atb.objLayer.PutObjectPart(context.Background(), bucketName, objName,
|
||||
uploadID, 3, mustGetHashReader(t, bytes.NewReader(
|
||||
uploadID, 3, mustGetPutObjReader(t, bytes.NewReader(
|
||||
[]byte("hello")), int64(len("hello")), "", ""), ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("mp put error: %v", err)
|
||||
@@ -480,6 +512,8 @@ func getServiceCmdRequest(cmd cmdType, cred auth.Credentials, body []byte) (*htt
|
||||
|
||||
// Set body
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(body))
|
||||
req.ContentLength = int64(len(body))
|
||||
|
||||
// Set sha-sum header
|
||||
req.Header.Set("X-Amz-Content-Sha256", getSHA256Hash(body))
|
||||
|
||||
@@ -504,17 +538,22 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
// single node setup, this degenerates to a simple function
|
||||
// call under the hood.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Setting up a go routine to simulate ServerRouter's
|
||||
// handleServiceSignals for stop and restart commands.
|
||||
if cmd == restartCmd {
|
||||
go testServiceSignalReceiver(cmd, t)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
testServiceSignalReceiver(cmd, t)
|
||||
}()
|
||||
}
|
||||
credentials := globalServerConfig.GetCredential()
|
||||
|
||||
body, err := json.Marshal(madmin.ServiceAction{
|
||||
cmd.toServiceActionValue()})
|
||||
Action: cmd.toServiceActionValue()})
|
||||
if err != nil {
|
||||
t.Fatalf("JSONify error: %v", err)
|
||||
}
|
||||
@@ -545,6 +584,9 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
t.Errorf("Expected to receive %d status code but received %d. Body (%s)",
|
||||
http.StatusOK, rec.Code, string(resp))
|
||||
}
|
||||
|
||||
// Wait until testServiceSignalReceiver() called in a goroutine quits.
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Test for service status management REST API.
|
||||
@@ -569,7 +611,6 @@ func TestServiceSetCreds(t *testing.T) {
|
||||
// single node setup, this degenerates to a simple function
|
||||
// call under the hood.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
|
||||
|
||||
credentials := globalServerConfig.GetCredential()
|
||||
|
||||
@@ -601,7 +642,7 @@ func TestServiceSetCreds(t *testing.T) {
|
||||
t.Fatalf("JSONify err: %v", err)
|
||||
}
|
||||
|
||||
ebody, err := madmin.EncryptServerConfigData(credentials.SecretKey, body)
|
||||
ebody, err := madmin.EncryptData(credentials.SecretKey, body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -668,7 +709,6 @@ func TestGetConfigHandler(t *testing.T) {
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
|
||||
|
||||
// Prepare query params for get-config mgmt REST API.
|
||||
queryVal := url.Values{}
|
||||
@@ -697,18 +737,13 @@ func TestSetConfigHandler(t *testing.T) {
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
|
||||
|
||||
// SetConfigHandler restarts minio setup - need to start a
|
||||
// signal receiver to receive on globalServiceSignalCh.
|
||||
go testServiceSignalReceiver(restartCmd, t)
|
||||
|
||||
// Prepare query params for set-config mgmt REST API.
|
||||
queryVal := url.Values{}
|
||||
queryVal.Set("config", "")
|
||||
|
||||
password := globalServerConfig.GetCredential().SecretKey
|
||||
econfigJSON, err := madmin.EncryptServerConfigData(password, configJSON)
|
||||
econfigJSON, err := madmin.EncryptData(password, configJSON)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -722,13 +757,13 @@ func TestSetConfigHandler(t *testing.T) {
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Errorf("Expected to succeed but failed with %d", rec.Code)
|
||||
t.Errorf("Expected to succeed but failed with %d, body: %s", rec.Code, rec.Body)
|
||||
}
|
||||
|
||||
// Check that a very large config file returns an error.
|
||||
{
|
||||
// Make a large enough config string
|
||||
invalidCfg := []byte(strings.Repeat("A", maxConfigJSONSize+1))
|
||||
invalidCfg := []byte(strings.Repeat("A", maxEConfigJSONSize+1))
|
||||
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
|
||||
int64(len(invalidCfg)), bytes.NewReader(invalidCfg))
|
||||
if err != nil {
|
||||
@@ -737,7 +772,7 @@ func TestSetConfigHandler(t *testing.T) {
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
respBody := string(rec.Body.Bytes())
|
||||
respBody := rec.Body.String()
|
||||
if rec.Code != http.StatusBadRequest ||
|
||||
!strings.Contains(respBody, "Configuration data provided exceeds the allowed maximum of") {
|
||||
t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody)
|
||||
@@ -756,9 +791,9 @@ func TestSetConfigHandler(t *testing.T) {
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
respBody := string(rec.Body.Bytes())
|
||||
respBody := rec.Body.String()
|
||||
if rec.Code != http.StatusBadRequest ||
|
||||
!strings.Contains(respBody, "JSON configuration provided has objects with duplicate keys") {
|
||||
!strings.Contains(respBody, "JSON configuration provided is of incorrect format") {
|
||||
t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody)
|
||||
}
|
||||
}
|
||||
@@ -773,7 +808,6 @@ func TestAdminServerInfo(t *testing.T) {
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
|
||||
|
||||
// Prepare query params for set-config mgmt REST API.
|
||||
queryVal := url.Values{}
|
||||
@@ -813,8 +847,8 @@ func TestAdminServerInfo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestToAdminAPIErr - test for toAdminAPIErr helper function.
|
||||
func TestToAdminAPIErr(t *testing.T) {
|
||||
// TestToAdminAPIErrCode - test for toAdminAPIErrCode helper function.
|
||||
func TestToAdminAPIErrCode(t *testing.T) {
|
||||
testCases := []struct {
|
||||
err error
|
||||
expectedAPIErr APIErrorCode
|
||||
@@ -832,180 +866,15 @@ func TestToAdminAPIErr(t *testing.T) {
|
||||
// 3. Non-admin API specific error.
|
||||
{
|
||||
err: errDiskNotFound,
|
||||
expectedAPIErr: toAPIErrorCode(errDiskNotFound),
|
||||
expectedAPIErr: toAPIErrorCode(context.Background(), errDiskNotFound),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
actualErr := toAdminAPIErrCode(test.err)
|
||||
actualErr := toAdminAPIErrCode(context.Background(), test.err)
|
||||
if actualErr != test.expectedAPIErr {
|
||||
t.Errorf("Test %d: Expected %v but received %v",
|
||||
i+1, test.expectedAPIErr, actualErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mkHealStartReq(t *testing.T, bucket, prefix string,
|
||||
opts madmin.HealOpts) *http.Request {
|
||||
|
||||
body, err := json.Marshal(opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable marshal heal opts")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("/minio/admin/v1/heal/%s", bucket)
|
||||
if bucket != "" && prefix != "" {
|
||||
path += "/" + prefix
|
||||
}
|
||||
|
||||
req, err := newTestRequest("POST", path,
|
||||
int64(len(body)), bytes.NewReader(body))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct request - %v", err)
|
||||
}
|
||||
cred := globalServerConfig.GetCredential()
|
||||
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to sign request - %v", err)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
func mkHealStatusReq(t *testing.T, bucket, prefix,
|
||||
clientToken string) *http.Request {
|
||||
|
||||
path := fmt.Sprintf("/minio/admin/v1/heal/%s", bucket)
|
||||
if bucket != "" && prefix != "" {
|
||||
path += "/" + prefix
|
||||
}
|
||||
path += fmt.Sprintf("?clientToken=%s", clientToken)
|
||||
|
||||
req, err := newTestRequest("POST", path, 0, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct request - %v", err)
|
||||
}
|
||||
cred := globalServerConfig.GetCredential()
|
||||
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to sign request - %v", err)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
func collectHealResults(t *testing.T, adminTestBed *adminXLTestBed, bucket,
|
||||
prefix, clientToken string, timeLimitSecs int) madmin.HealTaskStatus {
|
||||
|
||||
var res, cur madmin.HealTaskStatus
|
||||
|
||||
// loop and fetch heal status. have a time-limit to loop over
|
||||
// all statuses.
|
||||
timeLimit := UTCNow().Add(time.Second * time.Duration(timeLimitSecs))
|
||||
for cur.Summary != healStoppedStatus && cur.Summary != healFinishedStatus {
|
||||
if UTCNow().After(timeLimit) {
|
||||
t.Fatalf("heal-status loop took too long - clientToken: %s", clientToken)
|
||||
}
|
||||
req := mkHealStatusReq(t, bucket, prefix, clientToken)
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if http.StatusOK != rec.Code {
|
||||
t.Errorf("Unexpected status code - got %d but expected %d",
|
||||
rec.Code, http.StatusOK)
|
||||
break
|
||||
}
|
||||
err := json.NewDecoder(rec.Body).Decode(&cur)
|
||||
if err != nil {
|
||||
t.Errorf("unable to unmarshal resp: %v", err)
|
||||
break
|
||||
}
|
||||
|
||||
// all results are accumulated into a slice
|
||||
// and returned to caller in the end
|
||||
allItems := append(res.Items, cur.Items...)
|
||||
res = cur
|
||||
res.Items = allItems
|
||||
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func TestHealStartNStatusHandler(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// gen. test data
|
||||
adminTestBed.GenerateHealTestData(t)
|
||||
defer adminTestBed.CleanupHealTestData(t)
|
||||
|
||||
// Prepare heal-start request to send to the server.
|
||||
healOpts := madmin.HealOpts{
|
||||
Recursive: true,
|
||||
DryRun: false,
|
||||
}
|
||||
bucketName, objName := "mybucket", "myobject-0"
|
||||
var hss madmin.HealStartSuccess
|
||||
|
||||
{
|
||||
req := mkHealStartReq(t, bucketName, objName, healOpts)
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if http.StatusOK != rec.Code {
|
||||
t.Errorf("Unexpected status code - got %d but expected %d",
|
||||
rec.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(rec.Body.Bytes(), &hss)
|
||||
if err != nil {
|
||||
t.Fatal("unable to unmarshal response")
|
||||
}
|
||||
|
||||
if hss.ClientToken == "" {
|
||||
t.Errorf("unexpected result")
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// test with an invalid client token
|
||||
req := mkHealStatusReq(t, bucketName, objName, hss.ClientToken+hss.ClientToken)
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusBadRequest {
|
||||
t.Errorf("Unexpected status code")
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// fetch heal status
|
||||
results := collectHealResults(t, adminTestBed, bucketName,
|
||||
objName, hss.ClientToken, 5)
|
||||
|
||||
// check if we got back an expected record
|
||||
foundIt := false
|
||||
for _, item := range results.Items {
|
||||
if item.Type == madmin.HealItemObject &&
|
||||
item.Bucket == bucketName && item.Object == objName {
|
||||
foundIt = true
|
||||
}
|
||||
}
|
||||
if !foundIt {
|
||||
t.Error("did not find expected heal record in heal results")
|
||||
}
|
||||
|
||||
// check that the heal settings in the results is the
|
||||
// same as what we started the heal seq. with.
|
||||
if results.HealSettings != healOpts {
|
||||
t.Errorf("unexpected heal settings: %v",
|
||||
results.HealSettings)
|
||||
}
|
||||
|
||||
if results.Summary == healStoppedStatus {
|
||||
t.Errorf("heal sequence stopped unexpectedly")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -61,9 +62,8 @@ var (
|
||||
errHealPushStopNDiscard = fmt.Errorf("heal push stopped due to heal stop signal")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(err error) error {
|
||||
errCode := toAPIErrorCode(err)
|
||||
apiErr := getAPIError(errCode)
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
apiErr := toAPIError(ctx, err)
|
||||
return fmt.Errorf("Heal internal error: %s: %s",
|
||||
apiErr.Code, apiErr.Description)
|
||||
}
|
||||
@@ -112,6 +112,32 @@ func initAllHealState(isErasureMode bool) {
|
||||
globalAllHealState = allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
}
|
||||
|
||||
go globalAllHealState.periodicHealSeqsClean()
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean() {
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
ticker := time.NewTicker(time.Minute * 5)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
now := UTCNow()
|
||||
ahs.Lock()
|
||||
for path, h := range ahs.healSeqMap {
|
||||
if h.hasEnded() && h.endTime.Add(keepHealSeqStateDuration).Before(now) {
|
||||
delete(ahs.healSeqMap, path)
|
||||
}
|
||||
}
|
||||
ahs.Unlock()
|
||||
case <-GlobalServiceDoneCh:
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getHealSequence - Retrieve a heal sequence by path. The second
|
||||
@@ -123,6 +149,35 @@ func (ahs *allHealState) getHealSequence(path string) (h *healSequence, exists b
|
||||
return h, exists
|
||||
}
|
||||
|
||||
func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
var hsp madmin.HealStopSuccess
|
||||
he, exists := ahs.getHealSequence(path)
|
||||
if !exists {
|
||||
hsp = madmin.HealStopSuccess{
|
||||
ClientToken: "invalid",
|
||||
StartTime: UTCNow(),
|
||||
}
|
||||
} else {
|
||||
hsp = madmin.HealStopSuccess{
|
||||
ClientToken: he.clientToken,
|
||||
ClientAddress: he.clientAddress,
|
||||
StartTime: he.startTime,
|
||||
}
|
||||
|
||||
he.stop()
|
||||
for !he.hasEnded() {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
// Heal sequence explicitly stopped, remove it.
|
||||
delete(ahs.healSeqMap, path)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(&hsp)
|
||||
return b, toAdminAPIErr(context.Background(), err)
|
||||
}
|
||||
|
||||
// LaunchNewHealSequence - launches a background routine that performs
|
||||
// healing according to the healSequence argument. For each heal
|
||||
// sequence, state is stored in the `globalAllHealState`, which is a
|
||||
@@ -134,7 +189,7 @@ func (ahs *allHealState) getHealSequence(path string) (h *healSequence, exists b
|
||||
// background routine to clean up heal results after the
|
||||
// aforementioned duration.
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
respBytes []byte, errCode APIErrorCode, errMsg string) {
|
||||
respBytes []byte, apiErr APIError, errMsg string) {
|
||||
|
||||
existsAndLive := false
|
||||
he, exists := ahs.getHealSequence(h.path)
|
||||
@@ -143,22 +198,21 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
existsAndLive = true
|
||||
}
|
||||
}
|
||||
|
||||
if existsAndLive {
|
||||
// A heal sequence exists on the given path.
|
||||
if h.forceStarted {
|
||||
// stop the running heal sequence - wait for
|
||||
// it to finish.
|
||||
// stop the running heal sequence - wait for it to finish.
|
||||
he.stop()
|
||||
for !he.hasEnded() {
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
} else {
|
||||
errMsg = "Heal is already running on the given path " +
|
||||
"(use force-start option to stop and start afresh). " +
|
||||
fmt.Sprintf("The heal was started by IP %s at %s",
|
||||
h.clientAddress, h.startTime)
|
||||
|
||||
return nil, ErrHealAlreadyRunning, errMsg
|
||||
fmt.Sprintf("The heal was started by IP %s at %s, token is %s",
|
||||
h.clientAddress, h.startTime.Format(http.TimeFormat), h.clientToken)
|
||||
return nil, errorCodes.ToAPIErr(ErrHealAlreadyRunning), errMsg
|
||||
}
|
||||
}
|
||||
|
||||
@@ -173,7 +227,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
|
||||
errMsg = "The provided heal sequence path overlaps with an existing " +
|
||||
fmt.Sprintf("heal path: %s", k)
|
||||
return nil, ErrHealOverlappingPaths, errMsg
|
||||
return nil, errorCodes.ToAPIErr(ErrHealOverlappingPaths), errMsg
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,51 +237,16 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart()
|
||||
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
go func() {
|
||||
var keepStateTimeout <-chan time.Time
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
defer ticker.Stop()
|
||||
everyMinute := ticker.C
|
||||
for {
|
||||
select {
|
||||
// Check every minute if heal sequence has ended.
|
||||
case <-everyMinute:
|
||||
if h.hasEnded() {
|
||||
keepStateTimeout = time.After(keepHealSeqStateDuration)
|
||||
everyMinute = nil
|
||||
}
|
||||
|
||||
// This case does not fire until the heal
|
||||
// sequence completes.
|
||||
case <-keepStateTimeout:
|
||||
// Heal sequence has ended, keep
|
||||
// results state duration has elapsed,
|
||||
// so purge state.
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
delete(ahs.healSeqMap, h.path)
|
||||
return
|
||||
|
||||
case <-globalServiceDoneCh:
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
b, err := json.Marshal(madmin.HealStartSuccess{
|
||||
ClientToken: h.clientToken,
|
||||
ClientAddress: h.clientAddress,
|
||||
StartTime: h.startTime,
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return nil, ErrInternalError, ""
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, toAPIError(h.ctx, err), ""
|
||||
}
|
||||
return b, ErrNone, ""
|
||||
return b, noError, ""
|
||||
}
|
||||
|
||||
// PopHealStatusJSON - Called by heal-status API. It fetches the heal
|
||||
@@ -272,7 +291,7 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
|
||||
jbytes, err := json.Marshal(h.currentStatus)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
@@ -285,12 +304,15 @@ type healSequence struct {
|
||||
// bucket, and prefix on which heal seq. was initiated
|
||||
bucket, objPrefix string
|
||||
|
||||
// path is just bucket + "/" + objPrefix
|
||||
// path is just pathJoin(bucket, objPrefix)
|
||||
path string
|
||||
|
||||
// time at which heal sequence was started
|
||||
startTime time.Time
|
||||
|
||||
// time at which heal sequence has ended
|
||||
endTime time.Time
|
||||
|
||||
// Heal client info
|
||||
clientToken, clientAddress string
|
||||
|
||||
@@ -330,7 +352,7 @@ func newHealSequence(bucket, objPrefix, clientAddr string,
|
||||
return &healSequence{
|
||||
bucket: bucket,
|
||||
objPrefix: objPrefix,
|
||||
path: bucket + "/" + objPrefix,
|
||||
path: pathJoin(bucket, objPrefix),
|
||||
startTime: UTCNow(),
|
||||
clientToken: mustGetUUID(),
|
||||
clientAddress: clientAddr,
|
||||
@@ -385,7 +407,6 @@ func (h *healSequence) stop() {
|
||||
// sequence automatically resumes. The return value indicates if the
|
||||
// operation succeeded.
|
||||
func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
|
||||
// start a timer to keep an upper time limit to find an empty
|
||||
// slot to add the given heal result - if no slot is found it
|
||||
// means that the server is holding the maximum amount of
|
||||
@@ -469,6 +490,7 @@ func (h *healSequence) healSequenceStart() {
|
||||
|
||||
select {
|
||||
case err, ok := <-h.traverseAndHealDoneCh:
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
defer h.currentStatus.updateLock.Unlock()
|
||||
// Heal traversal is complete.
|
||||
@@ -482,6 +504,7 @@ func (h *healSequence) healSequenceStart() {
|
||||
}
|
||||
|
||||
case <-h.stopSignalCh:
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
|
||||
@@ -521,6 +544,12 @@ func (h *healSequence) traverseAndHeal() {
|
||||
// Start with format healing
|
||||
checkErr(h.healDiskFormat)
|
||||
|
||||
// Start healing the config prefix.
|
||||
checkErr(h.healMinioSysMeta(minioConfigPrefix))
|
||||
|
||||
// Start healing the bucket config prefix.
|
||||
checkErr(h.healMinioSysMeta(bucketConfigPrefix))
|
||||
|
||||
// Heal buckets and objects
|
||||
checkErr(h.healBuckets)
|
||||
|
||||
@@ -531,9 +560,74 @@ func (h *healSequence) traverseAndHeal() {
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
// healMinioSysMeta - heals all files under a given meta prefix, returns a function
|
||||
// which in-turn heals the respective meta directory path and any files in int.
|
||||
func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
return func() error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// NOTE: Healing on meta is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// meta are always upto date and correct.
|
||||
marker := ""
|
||||
isTruncated := true
|
||||
for isTruncated {
|
||||
if globalHTTPServer != nil {
|
||||
// Wait at max 1 minute for an inprogress request
|
||||
// before proceeding to heal
|
||||
waitCount := 60
|
||||
// Any requests in progress, delay the heal.
|
||||
for globalHTTPServer.GetRequestCount() > 2 && waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Lists all objects under `config` prefix.
|
||||
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, minioMetaBucket, metaPrefix,
|
||||
marker, "", 1000)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
for index := range objectInfos.Objects {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
o := objectInfos.Objects[index]
|
||||
res, herr := objectAPI.HealObject(h.ctx, o.Bucket, o.Name, h.settings.DryRun, h.settings.Remove)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this file an move on.
|
||||
if isErrObjectNotFound(herr) {
|
||||
continue
|
||||
}
|
||||
if herr != nil {
|
||||
return herr
|
||||
}
|
||||
res.Type = madmin.HealItemBucketMetadata
|
||||
if err = h.pushHealResultItem(res); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
isTruncated = objectInfos.IsTruncated
|
||||
marker = objectInfos.NextMarker
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func (h *healSequence) healDiskFormat() error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
@@ -544,13 +638,18 @@ func (h *healSequence) healDiskFormat() error {
|
||||
// return any error, ignore error returned when disks have
|
||||
// already healed.
|
||||
if err != nil && err != errNoHealRequired {
|
||||
return errFnHealFromAPIErr(err)
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers only if healing succeeded.
|
||||
if err == nil {
|
||||
peersReInitFormat(globalAdminPeers, h.settings.DryRun)
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(h.settings.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(h.ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(h.ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push format heal result
|
||||
@@ -559,6 +658,10 @@ func (h *healSequence) healDiskFormat() error {
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets() error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// 1. If a bucket was specified, heal only the bucket.
|
||||
if h.bucket != "" {
|
||||
return h.healBucket(h.bucket)
|
||||
@@ -572,7 +675,7 @@ func (h *healSequence) healBuckets() error {
|
||||
|
||||
buckets, err := objectAPI.ListBucketsHeal(h.ctx)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(err)
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
@@ -586,36 +689,29 @@ func (h *healSequence) healBuckets() error {
|
||||
|
||||
// healBucket - traverses and heals given bucket
|
||||
func (h *healSequence) healBucket(bucket string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
results, err := objectAPI.HealBucket(h.ctx, bucket, h.settings.DryRun)
|
||||
// push any available results before checking for error
|
||||
for _, result := range results {
|
||||
if perr := h.pushHealResultItem(result); perr != nil {
|
||||
return perr
|
||||
}
|
||||
}
|
||||
result, err := objectAPI.HealBucket(h.ctx, bucket, h.settings.DryRun, h.settings.Remove)
|
||||
// handle heal-bucket error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = h.pushHealResultItem(result); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !h.settings.Recursive {
|
||||
if h.objPrefix != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
// and if so heal it.
|
||||
_, err = objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{})
|
||||
if err == nil {
|
||||
err = h.healObject(bucket, h.objPrefix)
|
||||
if err != nil {
|
||||
if err = h.healObject(bucket, h.objPrefix); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -624,7 +720,7 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := runtime.NumCPU() * globalEndpoints.Nodes()
|
||||
entries := runtime.NumCPU()
|
||||
|
||||
marker := ""
|
||||
isTruncated := true
|
||||
@@ -634,7 +730,7 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
// before proceeding to heal
|
||||
waitCount := 60
|
||||
// Any requests in progress, delay the heal.
|
||||
for globalHTTPServer.GetRequestCount() > 0 && waitCount > 0 {
|
||||
for globalHTTPServer.GetRequestCount() > 2 && waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
@@ -644,7 +740,7 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, bucket,
|
||||
h.objPrefix, marker, "", entries)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(err)
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
g := errgroup.WithNErrs(len(objectInfos.Objects))
|
||||
@@ -680,7 +776,10 @@ func (h *healSequence) healObject(bucket, object string) error {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
hri, err := objectAPI.HealObject(h.ctx, bucket, object, h.settings.DryRun)
|
||||
hri, err := objectAPI.HealObject(h.ctx, bucket, object, h.settings.DryRun, h.settings.Remove)
|
||||
if isErrObjectNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
hri.Detail = err.Error()
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018, 2019 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -31,7 +31,7 @@ type adminAPIHandlers struct {
|
||||
}
|
||||
|
||||
// registerAdminRouter - Add handler functions for each service REST API routes.
|
||||
func registerAdminRouter(router *mux.Router) {
|
||||
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) {
|
||||
|
||||
adminAPI := adminAPIHandlers{}
|
||||
// Admin router
|
||||
@@ -53,28 +53,71 @@ func registerAdminRouter(router *mux.Router) {
|
||||
// Info operations
|
||||
adminV1Router.Methods(http.MethodGet).Path("/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
|
||||
|
||||
/// Heal operations
|
||||
if globalIsDistXL || globalIsXL {
|
||||
/// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
// Heal processing endpoint.
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
|
||||
/// Health operations
|
||||
|
||||
}
|
||||
// Performance command - return performance details based on input type
|
||||
adminV1Router.Methods(http.MethodGet).Path("/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}")
|
||||
|
||||
// Profiling operations
|
||||
adminV1Router.Methods(http.MethodPost).Path("/profiling/start/{profiler}").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminV1Router.Methods(http.MethodGet).Path("/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
|
||||
|
||||
/// Config operations
|
||||
if enableConfigOps {
|
||||
// Update credentials
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config/credential").HandlerFunc(httpTraceHdrs(adminAPI.UpdateAdminCredentialsHandler))
|
||||
// Get config
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
|
||||
|
||||
// Update credentials
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config/credential").HandlerFunc(httpTraceHdrs(adminAPI.UpdateCredentialsHandler))
|
||||
// Get config
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
|
||||
// Get config keys/values
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKeysHandler))
|
||||
// Set config keys/values
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKeysHandler))
|
||||
}
|
||||
|
||||
// Get config keys/values
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKeysHandler))
|
||||
// Set config keys/values
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKeysHandler))
|
||||
if enableIAMOps {
|
||||
// -- IAM APIs --
|
||||
|
||||
// Add policy IAM
|
||||
adminV1Router.Methods(http.MethodPut).Path("/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name",
|
||||
"{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
adminV1Router.Methods(http.MethodPut).Path("/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-policy").HandlerFunc(httpTraceHdrs(adminAPI.SetUserPolicy)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("name", "{name:.*}")
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// Remove policy IAM
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Remove user IAM
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// List users
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
|
||||
// List policies
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
}
|
||||
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
adminV1Router.Methods(http.MethodGet).Path("/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
|
||||
// If none of the routes match, return error.
|
||||
adminV1Router.NotFoundHandler = http.HandlerFunc(httpTraceHdrs(notFoundHandlerJSON))
|
||||
}
|
||||
|
||||
@@ -1,295 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2014, 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
var errUnsupportedSignal = fmt.Errorf("unsupported signal: only restart and stop signals are supported")
|
||||
|
||||
// AdminRPCClient - admin RPC client talks to admin RPC server.
|
||||
type AdminRPCClient struct {
|
||||
*RPCClient
|
||||
}
|
||||
|
||||
// SignalService - calls SignalService RPC.
|
||||
func (rpcClient *AdminRPCClient) SignalService(signal serviceSignal) (err error) {
|
||||
args := SignalServiceArgs{Sig: signal}
|
||||
reply := VoidReply{}
|
||||
|
||||
return rpcClient.Call(adminServiceName+".SignalService", &args, &reply)
|
||||
}
|
||||
|
||||
// ReInitFormat - re-initialize disk format, remotely.
|
||||
func (rpcClient *AdminRPCClient) ReInitFormat(dryRun bool) error {
|
||||
args := ReInitFormatArgs{DryRun: dryRun}
|
||||
reply := VoidReply{}
|
||||
|
||||
return rpcClient.Call(adminServiceName+".ReInitFormat", &args, &reply)
|
||||
}
|
||||
|
||||
// ServerInfo - returns the server info of the server to which the RPC call is made.
|
||||
func (rpcClient *AdminRPCClient) ServerInfo() (sid ServerInfoData, err error) {
|
||||
err = rpcClient.Call(adminServiceName+".ServerInfo", &AuthArgs{}, &sid)
|
||||
return sid, err
|
||||
}
|
||||
|
||||
// GetConfig - returns config.json of the remote server.
|
||||
func (rpcClient *AdminRPCClient) GetConfig() ([]byte, error) {
|
||||
args := AuthArgs{}
|
||||
var reply []byte
|
||||
|
||||
err := rpcClient.Call(adminServiceName+".GetConfig", &args, &reply)
|
||||
return reply, err
|
||||
}
|
||||
|
||||
// StartProfiling - starts profiling in the remote server.
|
||||
func (rpcClient *AdminRPCClient) StartProfiling(profiler string) error {
|
||||
args := StartProfilingArgs{Profiler: profiler}
|
||||
reply := VoidReply{}
|
||||
return rpcClient.Call(adminServiceName+".StartProfiling", &args, &reply)
|
||||
}
|
||||
|
||||
// DownloadProfilingData - returns profiling data of the remote server.
|
||||
func (rpcClient *AdminRPCClient) DownloadProfilingData() ([]byte, error) {
|
||||
args := AuthArgs{}
|
||||
var reply []byte
|
||||
|
||||
err := rpcClient.Call(adminServiceName+".DownloadProfilingData", &args, &reply)
|
||||
return reply, err
|
||||
}
|
||||
|
||||
// NewAdminRPCClient - returns new admin RPC client.
|
||||
func NewAdminRPCClient(host *xnet.Host) (*AdminRPCClient, error) {
|
||||
scheme := "http"
|
||||
if globalIsSSL {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
serviceURL := &xnet.URL{
|
||||
Scheme: scheme,
|
||||
Host: host.String(),
|
||||
Path: adminServicePath,
|
||||
}
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
if globalIsSSL {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: host.Name,
|
||||
RootCAs: globalRootCAs,
|
||||
}
|
||||
}
|
||||
|
||||
rpcClient, err := NewRPCClient(
|
||||
RPCClientArgs{
|
||||
NewAuthTokenFunc: newAuthToken,
|
||||
RPCVersion: globalRPCAPIVersion,
|
||||
ServiceName: adminServiceName,
|
||||
ServiceURL: serviceURL,
|
||||
TLSConfig: tlsConfig,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AdminRPCClient{rpcClient}, nil
|
||||
}
|
||||
|
||||
// adminCmdRunner - abstracts local and remote execution of admin
|
||||
// commands like service stop and service restart.
|
||||
type adminCmdRunner interface {
|
||||
SignalService(s serviceSignal) error
|
||||
ReInitFormat(dryRun bool) error
|
||||
ServerInfo() (ServerInfoData, error)
|
||||
GetConfig() ([]byte, error)
|
||||
StartProfiling(string) error
|
||||
DownloadProfilingData() ([]byte, error)
|
||||
}
|
||||
|
||||
// adminPeer - represents an entity that implements admin API RPCs.
|
||||
type adminPeer struct {
|
||||
addr string
|
||||
cmdRunner adminCmdRunner
|
||||
isLocal bool
|
||||
}
|
||||
|
||||
// type alias for a collection of adminPeer.
|
||||
type adminPeers []adminPeer
|
||||
|
||||
// makeAdminPeers - helper function to construct a collection of adminPeer.
|
||||
func makeAdminPeers(endpoints EndpointList) (adminPeerList adminPeers) {
|
||||
localAddr := GetLocalPeer(endpoints)
|
||||
if strings.HasPrefix(localAddr, "127.0.0.1:") {
|
||||
// Use first IPv4 instead of loopback address.
|
||||
localAddr = net.JoinHostPort(sortIPs(localIP4.ToSlice())[0], globalMinioPort)
|
||||
}
|
||||
adminPeerList = append(adminPeerList, adminPeer{
|
||||
addr: localAddr,
|
||||
cmdRunner: localAdminClient{},
|
||||
isLocal: true,
|
||||
})
|
||||
|
||||
for _, hostStr := range GetRemotePeers(endpoints) {
|
||||
host, err := xnet.ParseHost(hostStr)
|
||||
logger.FatalIf(err, "Unable to parse Admin RPC Host")
|
||||
rpcClient, err := NewAdminRPCClient(host)
|
||||
logger.FatalIf(err, "Unable to initialize Admin RPC Client")
|
||||
adminPeerList = append(adminPeerList, adminPeer{
|
||||
addr: hostStr,
|
||||
cmdRunner: rpcClient,
|
||||
})
|
||||
}
|
||||
|
||||
return adminPeerList
|
||||
}
|
||||
|
||||
// peersReInitFormat - reinitialize remote object layers to new format.
|
||||
func peersReInitFormat(peers adminPeers, dryRun bool) error {
|
||||
errs := make([]error, len(peers))
|
||||
|
||||
// Send ReInitFormat RPC call to all nodes.
|
||||
// for local adminPeer this is a no-op.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
if !peer.isLocal {
|
||||
errs[idx] = peer.cmdRunner.ReInitFormat(dryRun)
|
||||
}
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initialize global adminPeer collection.
|
||||
func initGlobalAdminPeers(endpoints EndpointList) {
|
||||
globalAdminPeers = makeAdminPeers(endpoints)
|
||||
}
|
||||
|
||||
// invokeServiceCmd - Invoke Restart/Stop command.
|
||||
func invokeServiceCmd(cp adminPeer, cmd serviceSignal) (err error) {
|
||||
switch cmd {
|
||||
case serviceRestart, serviceStop:
|
||||
err = cp.cmdRunner.SignalService(cmd)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// sendServiceCmd - Invoke Restart command on remote peers
|
||||
// adminPeer followed by on the local peer.
|
||||
func sendServiceCmd(cps adminPeers, cmd serviceSignal) {
|
||||
// Send service command like stop or restart to all remote nodes and finally run on local node.
|
||||
errs := make([]error, len(cps))
|
||||
var wg sync.WaitGroup
|
||||
remotePeers := cps[1:]
|
||||
for i := range remotePeers {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
// we use idx+1 because remotePeers slice is 1 position shifted w.r.t cps
|
||||
errs[idx+1] = invokeServiceCmd(remotePeers[idx], cmd)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
errs[0] = invokeServiceCmd(cps[0], cmd)
|
||||
}
|
||||
|
||||
// uptimeSlice - used to sort uptimes in chronological order.
|
||||
type uptimeSlice []struct {
|
||||
err error
|
||||
uptime time.Duration
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Len() int {
|
||||
return len(ts)
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Less(i, j int) bool {
|
||||
return ts[i].uptime < ts[j].uptime
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Swap(i, j int) {
|
||||
ts[i], ts[j] = ts[j], ts[i]
|
||||
}
|
||||
|
||||
// getPeerUptimes - returns the uptime since the last time read quorum
|
||||
// was established on success. Otherwise returns errXLReadQuorum.
|
||||
func getPeerUptimes(peers adminPeers) (time.Duration, error) {
|
||||
// In a single node Erasure or FS backend setup the uptime of
|
||||
// the setup is the uptime of the single minio server
|
||||
// instance.
|
||||
if !globalIsDistXL {
|
||||
return UTCNow().Sub(globalBootTime), nil
|
||||
}
|
||||
|
||||
uptimes := make(uptimeSlice, len(peers))
|
||||
|
||||
// Get up time of all servers.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
serverInfoData, rpcErr := peer.cmdRunner.ServerInfo()
|
||||
uptimes[idx].uptime, uptimes[idx].err = serverInfoData.Properties.Uptime, rpcErr
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Sort uptimes in chronological order.
|
||||
sort.Sort(uptimes)
|
||||
|
||||
// Pick the readQuorum'th uptime in chronological order. i.e,
|
||||
// the time at which read quorum was (re-)established.
|
||||
readQuorum := len(uptimes) / 2
|
||||
validCount := 0
|
||||
latestUptime := time.Duration(0)
|
||||
for _, uptime := range uptimes {
|
||||
if uptime.err != nil {
|
||||
logger.LogIf(context.Background(), uptime.err)
|
||||
continue
|
||||
}
|
||||
|
||||
validCount++
|
||||
if validCount >= readQuorum {
|
||||
latestUptime = uptime.uptime
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Less than readQuorum "Admin.Uptime" RPC call returned
|
||||
// successfully, so read-quorum unavailable.
|
||||
if validCount < readQuorum {
|
||||
return time.Duration(0), InsufficientReadQuorum{}
|
||||
}
|
||||
|
||||
return latestUptime, nil
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
xrpc "github.com/minio/minio/cmd/rpc"
|
||||
)
|
||||
|
||||
const adminServiceName = "Admin"
|
||||
const adminServiceSubPath = "/admin"
|
||||
|
||||
var adminServicePath = path.Join(minioReservedBucketPath, adminServiceSubPath)
|
||||
|
||||
// adminRPCReceiver - Admin RPC receiver for admin RPC server.
|
||||
type adminRPCReceiver struct {
|
||||
local *localAdminClient
|
||||
}
|
||||
|
||||
// SignalServiceArgs - provides the signal argument to SignalService RPC
|
||||
type SignalServiceArgs struct {
|
||||
AuthArgs
|
||||
Sig serviceSignal
|
||||
}
|
||||
|
||||
// SignalService - Send a restart or stop signal to the service
|
||||
func (receiver *adminRPCReceiver) SignalService(args *SignalServiceArgs, reply *VoidReply) error {
|
||||
return receiver.local.SignalService(args.Sig)
|
||||
}
|
||||
|
||||
// ServerInfo - returns the server info when object layer was initialized on this server.
|
||||
func (receiver *adminRPCReceiver) ServerInfo(args *AuthArgs, reply *ServerInfoData) (err error) {
|
||||
*reply, err = receiver.local.ServerInfo()
|
||||
return err
|
||||
}
|
||||
|
||||
// StartProfilingArgs - holds the RPC argument for StartingProfiling RPC call
|
||||
type StartProfilingArgs struct {
|
||||
AuthArgs
|
||||
Profiler string
|
||||
}
|
||||
|
||||
// StartProfiling - starts profiling of this server
|
||||
func (receiver *adminRPCReceiver) StartProfiling(args *StartProfilingArgs, reply *VoidReply) error {
|
||||
return receiver.local.StartProfiling(args.Profiler)
|
||||
}
|
||||
|
||||
// DownloadProfilingData - stops and returns profiling data of this server
|
||||
func (receiver *adminRPCReceiver) DownloadProfilingData(args *AuthArgs, reply *[]byte) (err error) {
|
||||
*reply, err = receiver.local.DownloadProfilingData()
|
||||
return
|
||||
}
|
||||
|
||||
// GetConfig - returns the config.json of this server.
|
||||
func (receiver *adminRPCReceiver) GetConfig(args *AuthArgs, reply *[]byte) (err error) {
|
||||
*reply, err = receiver.local.GetConfig()
|
||||
return err
|
||||
}
|
||||
|
||||
// ReInitFormatArgs - provides dry-run information to re-initialize format.json
|
||||
type ReInitFormatArgs struct {
|
||||
AuthArgs
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
// ReInitFormat - re-init 'format.json'
|
||||
func (receiver *adminRPCReceiver) ReInitFormat(args *ReInitFormatArgs, reply *VoidReply) error {
|
||||
return receiver.local.ReInitFormat(args.DryRun)
|
||||
}
|
||||
|
||||
// NewAdminRPCServer - returns new admin RPC server.
|
||||
func NewAdminRPCServer() (*xrpc.Server, error) {
|
||||
rpcServer := xrpc.NewServer()
|
||||
if err := rpcServer.RegisterName(adminServiceName, &adminRPCReceiver{&localAdminClient{}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rpcServer, nil
|
||||
}
|
||||
|
||||
// registerAdminRPCRouter - creates and registers Admin RPC server and its router.
|
||||
func registerAdminRPCRouter(router *mux.Router) {
|
||||
rpcServer, err := NewAdminRPCServer()
|
||||
logger.FatalIf(err, "Unable to initialize Lock RPC Server")
|
||||
subrouter := router.PathPrefix(minioReservedBucketPath).Subrouter()
|
||||
subrouter.Path(adminServiceSubPath).HandlerFunc(httpTraceHdrs(rpcServer.ServeHTTP))
|
||||
}
|
||||
@@ -1,239 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// localAdminClient and AdminRPCClient are adminCmdRunner interface compatible,
|
||||
// hence below test functions are available for both clients.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Admin RPC server, adminRPCReceiver and AdminRPCClient are
|
||||
// inter-dependent, below test functions are sufficient to test all of them.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func testAdminCmdRunnerSignalService(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalServiceSignalCh := globalServiceSignalCh
|
||||
globalServiceSignalCh = make(chan serviceSignal, 10)
|
||||
defer func() {
|
||||
globalServiceSignalCh = tmpGlobalServiceSignalCh
|
||||
}()
|
||||
|
||||
testCases := []struct {
|
||||
signal serviceSignal
|
||||
expectErr bool
|
||||
}{
|
||||
{serviceRestart, false},
|
||||
{serviceStop, false},
|
||||
{serviceStatus, true},
|
||||
{serviceSignal(100), true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := client.SignalService(testCase.signal)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerReInitFormat(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalObjectAPI := globalObjectAPI
|
||||
defer func() {
|
||||
globalObjectAPI = tmpGlobalObjectAPI
|
||||
}()
|
||||
|
||||
testCases := []struct {
|
||||
objectAPI ObjectLayer
|
||||
dryRun bool
|
||||
expectErr bool
|
||||
}{
|
||||
{&DummyObjectLayer{}, true, false},
|
||||
{&DummyObjectLayer{}, false, false},
|
||||
{nil, true, true},
|
||||
{nil, false, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
globalObjectAPI = testCase.objectAPI
|
||||
err := client.ReInitFormat(testCase.dryRun)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerServerInfo(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalBootTime := globalBootTime
|
||||
tmpGlobalObjectAPI := globalObjectAPI
|
||||
tmpGlobalConnStats := globalConnStats
|
||||
tmpGlobalHTTPStats := globalHTTPStats
|
||||
tmpGlobalNotificationSys := globalNotificationSys
|
||||
defer func() {
|
||||
globalBootTime = tmpGlobalBootTime
|
||||
globalObjectAPI = tmpGlobalObjectAPI
|
||||
globalConnStats = tmpGlobalConnStats
|
||||
globalHTTPStats = tmpGlobalHTTPStats
|
||||
globalNotificationSys = tmpGlobalNotificationSys
|
||||
}()
|
||||
|
||||
endpoints := new(EndpointList)
|
||||
|
||||
notificationSys := NewNotificationSys(globalServerConfig, *endpoints)
|
||||
|
||||
testCases := []struct {
|
||||
bootTime time.Time
|
||||
objectAPI ObjectLayer
|
||||
connStats *ConnStats
|
||||
httpStats *HTTPStats
|
||||
notificationSys *NotificationSys
|
||||
expectErr bool
|
||||
}{
|
||||
{UTCNow(), &DummyObjectLayer{}, newConnStats(), newHTTPStats(), notificationSys, false},
|
||||
{time.Time{}, nil, nil, nil, nil, true},
|
||||
{UTCNow(), nil, nil, nil, nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
globalBootTime = testCase.bootTime
|
||||
globalObjectAPI = testCase.objectAPI
|
||||
globalConnStats = testCase.connStats
|
||||
globalHTTPStats = testCase.httpStats
|
||||
globalNotificationSys = testCase.notificationSys
|
||||
_, err := client.ServerInfo()
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerGetConfig(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalServerConfig := globalServerConfig
|
||||
defer func() {
|
||||
globalServerConfig = tmpGlobalServerConfig
|
||||
}()
|
||||
|
||||
config := newServerConfig()
|
||||
|
||||
testCases := []struct {
|
||||
config *serverConfig
|
||||
expectErr bool
|
||||
}{
|
||||
{globalServerConfig, false},
|
||||
{config, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
globalServerConfig = testCase.config
|
||||
_, err := client.GetConfig()
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newAdminRPCHTTPServerClient(t *testing.T) (*httptest.Server, *AdminRPCClient, *serverConfig) {
|
||||
rpcServer, err := NewAdminRPCServer()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
httpServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
rpcServer.ServeHTTP(w, r)
|
||||
}))
|
||||
|
||||
url, err := xnet.ParseURL(httpServer.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
host, err := xnet.ParseHost(url.Host)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
prevGlobalServerConfig := globalServerConfig
|
||||
globalServerConfig = newServerConfig()
|
||||
|
||||
rpcClient, err := NewAdminRPCClient(host)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
return httpServer, rpcClient, prevGlobalServerConfig
|
||||
}
|
||||
|
||||
func TestAdminRPCClientSignalService(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerSignalService(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientReInitFormat(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerReInitFormat(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientServerInfo(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerServerInfo(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientGetConfig(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerGetConfig(t, rpcClient)
|
||||
}
|
||||
@@ -23,92 +23,10 @@ import (
|
||||
const (
|
||||
// Response request id.
|
||||
responseRequestIDKey = "x-amz-request-id"
|
||||
// Deployment id.
|
||||
responseDeploymentIDKey = "x-minio-deployment-id"
|
||||
)
|
||||
|
||||
// CSVFileHeaderInfo -Can be either USE IGNORE OR NONE, defines what to do with
|
||||
// the first row
|
||||
type CSVFileHeaderInfo string
|
||||
|
||||
// Constants for file header info.
|
||||
const (
|
||||
CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE"
|
||||
CSVFileHeaderInfoIgnore = "IGNORE"
|
||||
CSVFileHeaderInfoUse = "USE"
|
||||
)
|
||||
|
||||
// SelectCompressionType - ONLY GZIP is supported
|
||||
type SelectCompressionType string
|
||||
|
||||
// Constants for compression types under select API.
|
||||
const (
|
||||
SelectCompressionNONE SelectCompressionType = "NONE"
|
||||
SelectCompressionGZIP = "GZIP"
|
||||
SelectCompressionBZIP = "BZIP2"
|
||||
)
|
||||
|
||||
// CSVQuoteFields - Can be either Always or AsNeeded
|
||||
type CSVQuoteFields string
|
||||
|
||||
// Constants for csv quote styles.
|
||||
const (
|
||||
CSVQuoteFieldsAlways CSVQuoteFields = "Always"
|
||||
CSVQuoteFieldsAsNeeded = "AsNeeded"
|
||||
)
|
||||
|
||||
// QueryExpressionType - Currently can only be SQL
|
||||
type QueryExpressionType string
|
||||
|
||||
// Constants for expression type.
|
||||
const (
|
||||
QueryExpressionTypeSQL QueryExpressionType = "SQL"
|
||||
)
|
||||
|
||||
// JSONType determines json input serialization type.
|
||||
type JSONType string
|
||||
|
||||
// Constants for JSONTypes.
|
||||
const (
|
||||
JSONDocumentType JSONType = "Document"
|
||||
JSONLinesType = "Lines"
|
||||
)
|
||||
|
||||
// ObjectSelectRequest - represents the input select body
|
||||
type ObjectSelectRequest struct {
|
||||
XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"`
|
||||
Expression string
|
||||
ExpressionType QueryExpressionType
|
||||
InputSerialization struct {
|
||||
CompressionType SelectCompressionType
|
||||
Parquet *struct{}
|
||||
CSV *struct {
|
||||
FileHeaderInfo CSVFileHeaderInfo
|
||||
RecordDelimiter string
|
||||
FieldDelimiter string
|
||||
QuoteCharacter string
|
||||
QuoteEscapeCharacter string
|
||||
Comments string
|
||||
}
|
||||
JSON *struct {
|
||||
Type JSONType
|
||||
}
|
||||
}
|
||||
OutputSerialization struct {
|
||||
CSV *struct {
|
||||
QuoteFields CSVQuoteFields
|
||||
RecordDelimiter string
|
||||
FieldDelimiter string
|
||||
QuoteCharacter string
|
||||
QuoteEscapeCharacter string
|
||||
}
|
||||
JSON *struct {
|
||||
RecordDelimiter string
|
||||
}
|
||||
}
|
||||
RequestProgress struct {
|
||||
Enabled bool
|
||||
}
|
||||
}
|
||||
|
||||
// ObjectIdentifier carries key name for the object to delete.
|
||||
type ObjectIdentifier struct {
|
||||
ObjectName string `xml:"Key"`
|
||||
|
||||
@@ -22,12 +22,17 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
minio "github.com/minio/minio-go"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/s3select"
|
||||
)
|
||||
|
||||
// APIError structure
|
||||
@@ -68,6 +73,7 @@ const (
|
||||
ErrInvalidCopyPartRange
|
||||
ErrInvalidCopyPartRangeSource
|
||||
ErrInvalidMaxKeys
|
||||
ErrInvalidEncodingMethod
|
||||
ErrInvalidMaxUploads
|
||||
ErrInvalidMaxParts
|
||||
ErrInvalidPartNumberMarker
|
||||
@@ -85,6 +91,7 @@ const (
|
||||
ErrNoSuchBucketPolicy
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrNoSuchVersion
|
||||
ErrNotImplemented
|
||||
ErrPreconditionFailed
|
||||
ErrRequestTimeTooSkewed
|
||||
@@ -107,7 +114,6 @@ const (
|
||||
ErrInvalidRequestVersion
|
||||
ErrMissingSignTag
|
||||
ErrMissingSignHeadersTag
|
||||
ErrPolicyAlreadyExpired
|
||||
ErrMalformedDate
|
||||
ErrMalformedPresignedDate
|
||||
ErrMalformedCredentialDate
|
||||
@@ -150,6 +156,9 @@ const (
|
||||
ErrKMSNotConfigured
|
||||
ErrKMSAuthFailure
|
||||
|
||||
ErrNoAccessKey
|
||||
ErrInvalidToken
|
||||
|
||||
// Bucket notification related errors.
|
||||
ErrEventNotification
|
||||
ErrARNNotification
|
||||
@@ -187,11 +196,15 @@ const (
|
||||
// new error codes here.
|
||||
|
||||
ErrMalformedJSON
|
||||
ErrAdminNoSuchUser
|
||||
ErrAdminNoSuchPolicy
|
||||
ErrAdminInvalidArgument
|
||||
ErrAdminInvalidAccessKey
|
||||
ErrAdminInvalidSecretKey
|
||||
ErrAdminConfigNoQuorum
|
||||
ErrAdminConfigTooLarge
|
||||
ErrAdminConfigBadJSON
|
||||
ErrAdminConfigDuplicateKeys
|
||||
ErrAdminCredentialsMismatch
|
||||
ErrInsecureClientRequest
|
||||
ErrObjectTampered
|
||||
@@ -204,7 +217,7 @@ const (
|
||||
ErrHealOverlappingPaths
|
||||
ErrIncorrectContinuationToken
|
||||
|
||||
//S3 Select Errors
|
||||
// S3 Select Errors
|
||||
ErrEmptyRequestBody
|
||||
ErrUnsupportedFunction
|
||||
ErrInvalidExpressionType
|
||||
@@ -249,7 +262,6 @@ const (
|
||||
ErrParseUnsupportedAlias
|
||||
ErrParseUnsupportedSyntax
|
||||
ErrParseUnknownOperator
|
||||
ErrParseInvalidPathComponent
|
||||
ErrParseMissingIdentAfterAt
|
||||
ErrParseUnexpectedOperator
|
||||
ErrParseUnexpectedTerm
|
||||
@@ -288,14 +300,28 @@ const (
|
||||
ErrEvaluatorInvalidTimestampFormatPatternToken
|
||||
ErrEvaluatorInvalidTimestampFormatPatternSymbol
|
||||
ErrEvaluatorBindingDoesNotExist
|
||||
ErrInvalidColumnIndex
|
||||
ErrMissingHeaders
|
||||
ErrInvalidColumnIndex
|
||||
|
||||
ErrAdminConfigNotificationTargetsFailed
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
)
|
||||
|
||||
type errorCodeMap map[APIErrorCode]APIError
|
||||
|
||||
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
|
||||
apiErr, ok := e[errCode]
|
||||
if !ok {
|
||||
return e[ErrInternalError]
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
|
||||
// error code to APIError structure, these fields carry respective
|
||||
// descriptions for all the error responses.
|
||||
var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
var errorCodes = errorCodeMap{
|
||||
ErrInvalidCopyDest: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.",
|
||||
@@ -331,6 +357,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "Argument maxKeys must be an integer between 0 and 2147483647",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidEncodingMethod: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Invalid Encoding Method specified in Request",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidMaxParts: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Argument max-parts must be an integer between 0 and 2147483647",
|
||||
@@ -436,6 +467,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchVersion: {
|
||||
Code: "NoSuchVersion",
|
||||
Description: "Indicates that the version ID specified in the request does not match an existing version.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNotImplemented: {
|
||||
Code: "NotImplemented",
|
||||
Description: "A header you provided implies functionality that is not implemented",
|
||||
@@ -588,11 +624,6 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "Signature header missing SignedHeaders field.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPolicyAlreadyExpired: {
|
||||
Code: "AccessDenied",
|
||||
Description: "Invalid according to Policy: Policy expired.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMalformedExpires: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "X-Amz-Expires should be a number",
|
||||
@@ -802,6 +833,16 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "Server side encryption specified but KMS authorization failed",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoAccessKey: {
|
||||
Code: "AccessDenied",
|
||||
Description: "No AWSAccessKey was presented",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidToken: {
|
||||
Code: "InvalidTokenId",
|
||||
Description: "The security token included in the request is invalid",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
|
||||
/// S3 extensions.
|
||||
ErrContentSHA256Mismatch: {
|
||||
@@ -861,6 +902,21 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "The JSON you provided was not well-formed or did not validate against our published format.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchUser: {
|
||||
Code: "XMinioAdminNoSuchUser",
|
||||
Description: "The specified user does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminNoSuchPolicy: {
|
||||
Code: "XMinioAdminNoSuchPolicy",
|
||||
Description: "The canned policy does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminInvalidArgument: {
|
||||
Code: "XMinioAdminInvalidArgument",
|
||||
Description: "Invalid arguments specified.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminInvalidAccessKey: {
|
||||
Code: "XMinioAdminInvalidAccessKey",
|
||||
Description: "The access key is invalid.",
|
||||
@@ -879,11 +935,16 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
ErrAdminConfigTooLarge: {
|
||||
Code: "XMinioAdminConfigTooLarge",
|
||||
Description: fmt.Sprintf("Configuration data provided exceeds the allowed maximum of %d bytes",
|
||||
maxConfigJSONSize),
|
||||
maxEConfigJSONSize),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigBadJSON: {
|
||||
Code: "XMinioAdminConfigBadJSON",
|
||||
Description: "JSON configuration provided is of incorrect format",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigDuplicateKeys: {
|
||||
Code: "XMinioAdminConfigDuplicateKeys",
|
||||
Description: "JSON configuration provided has objects with duplicate keys",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
@@ -892,6 +953,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "Configuration update failed due an unsuccessful attempt to connect to one or more notification servers",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminProfilerNotEnabled: {
|
||||
Code: "XMinioAdminProfilerNotEnabled",
|
||||
Description: "Unable to perform the requested operation because profiling is not enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminCredentialsMismatch: {
|
||||
Code: "XMinioAdminCredentialsMismatch",
|
||||
Description: "Credentials in config mismatch with server environment variables",
|
||||
@@ -1192,11 +1258,6 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "The SQL expression contains an invalid operator.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseInvalidPathComponent: {
|
||||
Code: "ParseInvalidPathComponent",
|
||||
Description: "The SQL expression contains an invalid path component.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseMissingIdentAfterAt: {
|
||||
Code: "ParseMissingIdentAfterAt",
|
||||
Description: "Did not find the expected identifier after the @ symbol in the SQL expression.",
|
||||
@@ -1382,11 +1443,6 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "Time stamp format pattern contains an invalid symbol in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidColumnIndex: {
|
||||
Code: "InvalidColumnIndex",
|
||||
Description: "Column index in the SQL expression is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorBindingDoesNotExist: {
|
||||
Code: "ErrEvaluatorBindingDoesNotExist",
|
||||
Description: "A column name or a path provided does not exist in the SQL expression",
|
||||
@@ -1397,21 +1453,44 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "Some headers in the query are missing from the file. Check the file and try again.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidColumnIndex: {
|
||||
Code: "InvalidColumnIndex",
|
||||
Description: "The column index is invalid. Please check the service documentation and try again.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidDecompressedSize: {
|
||||
Code: "XMinioInvalidDecompressedSize",
|
||||
Description: "The data provided is unfit for decompression",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAddUserInvalidArgument: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "User is not allowed to be same as admin access key",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
// Add your error structure here.
|
||||
}
|
||||
|
||||
// toAPIErrorCode - Converts embedded errors. Convenience
|
||||
// function written to handle all cases where we have known types of
|
||||
// errors returned by underlying layers.
|
||||
func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
if err == nil {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
switch err {
|
||||
case errInvalidArgument:
|
||||
apiErr = ErrAdminInvalidArgument
|
||||
case errNoSuchUser:
|
||||
apiErr = ErrAdminNoSuchUser
|
||||
case errNoSuchPolicy:
|
||||
apiErr = ErrAdminNoSuchPolicy
|
||||
case errSignatureMismatch:
|
||||
apiErr = ErrSignatureDoesNotMatch
|
||||
case errInvalidRange:
|
||||
apiErr = ErrInvalidRange
|
||||
case errDataTooLarge:
|
||||
apiErr = ErrEntityTooLarge
|
||||
case errDataTooSmall:
|
||||
@@ -1421,10 +1500,10 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
case auth.ErrInvalidSecretKeyLength:
|
||||
apiErr = ErrAdminInvalidSecretKey
|
||||
// SSE errors
|
||||
case errInvalidEncryptionParameters:
|
||||
apiErr = ErrInvalidEncryptionParameters
|
||||
case crypto.ErrInvalidEncryptionMethod:
|
||||
apiErr = ErrInvalidEncryptionMethod
|
||||
case errInsecureSSERequest:
|
||||
apiErr = ErrInsecureSSECustomerRequest
|
||||
case crypto.ErrInvalidCustomerAlgorithm:
|
||||
apiErr = ErrInvalidSSECustomerAlgorithm
|
||||
case crypto.ErrInvalidCustomerKey:
|
||||
@@ -1452,168 +1531,11 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
case errOperationTimedOut, context.Canceled, context.DeadlineExceeded:
|
||||
apiErr = ErrOperationTimedOut
|
||||
}
|
||||
switch err {
|
||||
case s3select.ErrBusy:
|
||||
apiErr = ErrBusy
|
||||
case s3select.ErrUnauthorizedAccess:
|
||||
apiErr = ErrUnauthorizedAccess
|
||||
case s3select.ErrExpressionTooLong:
|
||||
apiErr = ErrExpressionTooLong
|
||||
case s3select.ErrIllegalSQLFunctionArgument:
|
||||
apiErr = ErrIllegalSQLFunctionArgument
|
||||
case s3select.ErrInvalidKeyPath:
|
||||
apiErr = ErrInvalidKeyPath
|
||||
case s3select.ErrInvalidCompressionFormat:
|
||||
apiErr = ErrInvalidCompressionFormat
|
||||
case s3select.ErrInvalidFileHeaderInfo:
|
||||
apiErr = ErrInvalidFileHeaderInfo
|
||||
case s3select.ErrInvalidJSONType:
|
||||
apiErr = ErrInvalidJSONType
|
||||
case s3select.ErrInvalidQuoteFields:
|
||||
apiErr = ErrInvalidQuoteFields
|
||||
case s3select.ErrInvalidRequestParameter:
|
||||
apiErr = ErrInvalidRequestParameter
|
||||
case s3select.ErrInvalidDataType:
|
||||
apiErr = ErrInvalidDataType
|
||||
case s3select.ErrInvalidTextEncoding:
|
||||
apiErr = ErrInvalidTextEncoding
|
||||
case s3select.ErrInvalidTableAlias:
|
||||
apiErr = ErrInvalidTableAlias
|
||||
case s3select.ErrMissingRequiredParameter:
|
||||
apiErr = ErrMissingRequiredParameter
|
||||
case s3select.ErrObjectSerializationConflict:
|
||||
apiErr = ErrObjectSerializationConflict
|
||||
case s3select.ErrUnsupportedSQLOperation:
|
||||
apiErr = ErrUnsupportedSQLOperation
|
||||
case s3select.ErrUnsupportedSQLStructure:
|
||||
apiErr = ErrUnsupportedSQLStructure
|
||||
case s3select.ErrUnsupportedSyntax:
|
||||
apiErr = ErrUnsupportedSyntax
|
||||
case s3select.ErrUnsupportedRangeHeader:
|
||||
apiErr = ErrUnsupportedRangeHeader
|
||||
case s3select.ErrLexerInvalidChar:
|
||||
apiErr = ErrLexerInvalidChar
|
||||
case s3select.ErrLexerInvalidOperator:
|
||||
apiErr = ErrLexerInvalidOperator
|
||||
case s3select.ErrLexerInvalidLiteral:
|
||||
apiErr = ErrLexerInvalidLiteral
|
||||
case s3select.ErrLexerInvalidIONLiteral:
|
||||
apiErr = ErrLexerInvalidIONLiteral
|
||||
case s3select.ErrParseExpectedDatePart:
|
||||
apiErr = ErrParseExpectedDatePart
|
||||
case s3select.ErrParseExpectedKeyword:
|
||||
apiErr = ErrParseExpectedKeyword
|
||||
case s3select.ErrParseExpectedTokenType:
|
||||
apiErr = ErrParseExpectedTokenType
|
||||
case s3select.ErrParseExpected2TokenTypes:
|
||||
apiErr = ErrParseExpected2TokenTypes
|
||||
case s3select.ErrParseExpectedNumber:
|
||||
apiErr = ErrParseExpectedNumber
|
||||
case s3select.ErrParseExpectedRightParenBuiltinFunctionCall:
|
||||
apiErr = ErrParseExpectedRightParenBuiltinFunctionCall
|
||||
case s3select.ErrParseExpectedTypeName:
|
||||
apiErr = ErrParseExpectedTypeName
|
||||
case s3select.ErrParseExpectedWhenClause:
|
||||
apiErr = ErrParseExpectedWhenClause
|
||||
case s3select.ErrParseUnsupportedToken:
|
||||
apiErr = ErrParseUnsupportedToken
|
||||
case s3select.ErrParseUnsupportedLiteralsGroupBy:
|
||||
apiErr = ErrParseUnsupportedLiteralsGroupBy
|
||||
case s3select.ErrParseExpectedMember:
|
||||
apiErr = ErrParseExpectedMember
|
||||
case s3select.ErrParseUnsupportedSelect:
|
||||
apiErr = ErrParseUnsupportedSelect
|
||||
case s3select.ErrParseUnsupportedCase:
|
||||
apiErr = ErrParseUnsupportedCase
|
||||
case s3select.ErrParseUnsupportedCaseClause:
|
||||
apiErr = ErrParseUnsupportedCaseClause
|
||||
case s3select.ErrParseUnsupportedAlias:
|
||||
apiErr = ErrParseUnsupportedAlias
|
||||
case s3select.ErrParseUnsupportedSyntax:
|
||||
apiErr = ErrParseUnsupportedSyntax
|
||||
case s3select.ErrParseUnknownOperator:
|
||||
apiErr = ErrParseUnknownOperator
|
||||
case s3select.ErrParseInvalidPathComponent:
|
||||
apiErr = ErrParseInvalidPathComponent
|
||||
case s3select.ErrParseMissingIdentAfterAt:
|
||||
apiErr = ErrParseMissingIdentAfterAt
|
||||
case s3select.ErrParseUnexpectedOperator:
|
||||
apiErr = ErrParseUnexpectedOperator
|
||||
case s3select.ErrParseUnexpectedTerm:
|
||||
apiErr = ErrParseUnexpectedTerm
|
||||
case s3select.ErrParseUnexpectedToken:
|
||||
apiErr = ErrParseUnexpectedToken
|
||||
case s3select.ErrParseUnexpectedKeyword:
|
||||
apiErr = ErrParseUnexpectedKeyword
|
||||
case s3select.ErrParseExpectedExpression:
|
||||
apiErr = ErrParseExpectedExpression
|
||||
case s3select.ErrParseExpectedLeftParenAfterCast:
|
||||
apiErr = ErrParseExpectedLeftParenAfterCast
|
||||
case s3select.ErrParseExpectedLeftParenValueConstructor:
|
||||
apiErr = ErrParseExpectedLeftParenValueConstructor
|
||||
case s3select.ErrParseExpectedLeftParenBuiltinFunctionCall:
|
||||
apiErr = ErrParseExpectedLeftParenBuiltinFunctionCall
|
||||
case s3select.ErrParseExpectedArgumentDelimiter:
|
||||
apiErr = ErrParseExpectedArgumentDelimiter
|
||||
case s3select.ErrParseCastArity:
|
||||
apiErr = ErrParseCastArity
|
||||
case s3select.ErrParseInvalidTypeParam:
|
||||
apiErr = ErrParseInvalidTypeParam
|
||||
case s3select.ErrParseEmptySelect:
|
||||
apiErr = ErrParseEmptySelect
|
||||
case s3select.ErrParseSelectMissingFrom:
|
||||
apiErr = ErrParseSelectMissingFrom
|
||||
case s3select.ErrParseExpectedIdentForGroupName:
|
||||
apiErr = ErrParseExpectedIdentForGroupName
|
||||
case s3select.ErrParseExpectedIdentForAlias:
|
||||
apiErr = ErrParseExpectedIdentForAlias
|
||||
case s3select.ErrParseUnsupportedCallWithStar:
|
||||
apiErr = ErrParseUnsupportedCallWithStar
|
||||
case s3select.ErrParseNonUnaryAgregateFunctionCall:
|
||||
apiErr = ErrParseNonUnaryAgregateFunctionCall
|
||||
case s3select.ErrParseMalformedJoin:
|
||||
apiErr = ErrParseMalformedJoin
|
||||
case s3select.ErrParseExpectedIdentForAt:
|
||||
apiErr = ErrParseExpectedIdentForAt
|
||||
case s3select.ErrParseAsteriskIsNotAloneInSelectList:
|
||||
apiErr = ErrParseAsteriskIsNotAloneInSelectList
|
||||
case s3select.ErrParseCannotMixSqbAndWildcardInSelectList:
|
||||
apiErr = ErrParseCannotMixSqbAndWildcardInSelectList
|
||||
case s3select.ErrParseInvalidContextForWildcardInSelectList:
|
||||
apiErr = ErrParseInvalidContextForWildcardInSelectList
|
||||
case s3select.ErrIncorrectSQLFunctionArgumentType:
|
||||
apiErr = ErrIncorrectSQLFunctionArgumentType
|
||||
case s3select.ErrValueParseFailure:
|
||||
apiErr = ErrValueParseFailure
|
||||
case s3select.ErrIntegerOverflow:
|
||||
apiErr = ErrIntegerOverflow
|
||||
case s3select.ErrLikeInvalidInputs:
|
||||
apiErr = ErrLikeInvalidInputs
|
||||
case s3select.ErrCastFailed:
|
||||
apiErr = ErrCastFailed
|
||||
case s3select.ErrInvalidCast:
|
||||
apiErr = ErrInvalidCast
|
||||
case s3select.ErrEvaluatorInvalidTimestampFormatPattern:
|
||||
apiErr = ErrEvaluatorInvalidTimestampFormatPattern
|
||||
case s3select.ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing:
|
||||
apiErr = ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing
|
||||
case s3select.ErrEvaluatorTimestampFormatPatternDuplicateFields:
|
||||
apiErr = ErrEvaluatorTimestampFormatPatternDuplicateFields
|
||||
case s3select.ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch:
|
||||
apiErr = ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch
|
||||
case s3select.ErrEvaluatorUnterminatedTimestampFormatPatternToken:
|
||||
apiErr = ErrEvaluatorUnterminatedTimestampFormatPatternToken
|
||||
case s3select.ErrEvaluatorInvalidTimestampFormatPatternToken:
|
||||
apiErr = ErrEvaluatorInvalidTimestampFormatPatternToken
|
||||
case s3select.ErrEvaluatorInvalidTimestampFormatPatternSymbol:
|
||||
apiErr = ErrEvaluatorInvalidTimestampFormatPatternSymbol
|
||||
case s3select.ErrInvalidColumnIndex:
|
||||
apiErr = ErrInvalidColumnIndex
|
||||
case s3select.ErrEvaluatorBindingDoesNotExist:
|
||||
apiErr = ErrEvaluatorBindingDoesNotExist
|
||||
case s3select.ErrMissingHeaders:
|
||||
apiErr = ErrMissingHeaders
|
||||
|
||||
// Compression errors
|
||||
switch err {
|
||||
case errInvalidDecompressedSize:
|
||||
apiErr = ErrInvalidDecompressedSize
|
||||
}
|
||||
|
||||
if apiErr != ErrNone {
|
||||
@@ -1720,6 +1642,64 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrObjectTampered
|
||||
default:
|
||||
apiErr = ErrInternalError
|
||||
// Make sure to log the errors which we cannot translate
|
||||
// to a meaningful S3 API errors. This is added to aid in
|
||||
// debugging unexpected/unhandled errors.
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
return apiErr
|
||||
}
|
||||
|
||||
var noError = APIError{}
|
||||
|
||||
// toAPIError - Converts embedded errors. Convenience
|
||||
// function written to handle all cases where we have known types of
|
||||
// errors returned by underlying layers.
|
||||
func toAPIError(ctx context.Context, err error) APIError {
|
||||
if err == nil {
|
||||
return noError
|
||||
}
|
||||
|
||||
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
// their internal error types. This code is only
|
||||
// useful with gateway implementations.
|
||||
switch e := err.(type) {
|
||||
case minio.ErrorResponse:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
case *googleapi.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XGCSInternalError",
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.Code,
|
||||
}
|
||||
// GCS may send multiple errors, just pick the first one
|
||||
// since S3 only sends one Error XML response.
|
||||
if len(e.Errors) >= 1 {
|
||||
apiErr.Code = e.Errors[0].Reason
|
||||
|
||||
}
|
||||
case storage.AzureStorageServiceError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
case oss.ServiceError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
// Add more Gateway SDKs here if any in future.
|
||||
}
|
||||
}
|
||||
|
||||
return apiErr
|
||||
@@ -1727,20 +1707,23 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
|
||||
// getAPIError provides API Error for input API error code.
|
||||
func getAPIError(code APIErrorCode) APIError {
|
||||
if apiErr, ok := errorCodeResponse[code]; ok {
|
||||
if apiErr, ok := errorCodes[code]; ok {
|
||||
return apiErr
|
||||
}
|
||||
return errorCodeResponse[ErrInternalError]
|
||||
return errorCodes.ToAPIErr(ErrInternalError)
|
||||
}
|
||||
|
||||
// getErrorResponse gets in standard error and resource value and
|
||||
// provides a encodable populated response values
|
||||
func getAPIErrorResponse(err APIError, resource, requestid string) APIErrorResponse {
|
||||
func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID, hostID string) APIErrorResponse {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
return APIErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
Resource: resource,
|
||||
RequestID: requestid,
|
||||
HostID: "3L137",
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
@@ -24,7 +25,7 @@ import (
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
var toAPIErrorCodeTests = []struct {
|
||||
var toAPIErrorTests = []struct {
|
||||
err error
|
||||
errCode APIErrorCode
|
||||
}{
|
||||
@@ -52,7 +53,6 @@ var toAPIErrorCodeTests = []struct {
|
||||
{err: errSignatureMismatch, errCode: ErrSignatureDoesNotMatch},
|
||||
|
||||
// SSE-C errors
|
||||
{err: errInsecureSSERequest, errCode: ErrInsecureSSECustomerRequest},
|
||||
{err: crypto.ErrInvalidCustomerAlgorithm, errCode: ErrInvalidSSECustomerAlgorithm},
|
||||
{err: crypto.ErrMissingCustomerKey, errCode: ErrMissingSSECustomerKey},
|
||||
{err: crypto.ErrInvalidCustomerKey, errCode: ErrInvalidSSECustomerKey},
|
||||
@@ -65,8 +65,9 @@ var toAPIErrorCodeTests = []struct {
|
||||
}
|
||||
|
||||
func TestAPIErrCode(t *testing.T) {
|
||||
for i, testCase := range toAPIErrorCodeTests {
|
||||
errCode := toAPIErrorCode(testCase.err)
|
||||
ctx := context.Background()
|
||||
for i, testCase := range toAPIErrorTests {
|
||||
errCode := toAPIErrorCode(ctx, testCase.err)
|
||||
if errCode != testCase.errCode {
|
||||
t.Errorf("Test %d: Expected error code %d, got %d", i+1, testCase.errCode, errCode)
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func mustGetRequestID(t time.Time) string {
|
||||
|
||||
// Write http common headers
|
||||
func setCommonHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set("Server", globalServerUserAgent)
|
||||
w.Header().Set("Server", "Minio/"+ReleaseTag)
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerConfig.GetRegion(); region != "" {
|
||||
@@ -87,6 +87,9 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
w.Header().Set("Content-Encoding", objInfo.ContentEncoding)
|
||||
}
|
||||
|
||||
if !objInfo.Expires.IsZero() {
|
||||
w.Header().Set("Expires", objInfo.Expires.UTC().Format(http.TimeFormat))
|
||||
}
|
||||
// Set all other user defined metadata.
|
||||
for k, v := range objInfo.UserDefined {
|
||||
if hasPrefix(k, ReservedMetadataPrefix) {
|
||||
@@ -104,7 +107,11 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case objInfo.IsCompressed():
|
||||
totalObjectSize = objInfo.GetActualSize()
|
||||
if totalObjectSize < 0 {
|
||||
return errInvalidDecompressedSize
|
||||
}
|
||||
default:
|
||||
totalObjectSize = objInfo.Size
|
||||
}
|
||||
|
||||
@@ -22,15 +22,22 @@ import (
|
||||
)
|
||||
|
||||
// Parse bucket url queries
|
||||
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string) {
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string, errCode APIErrorCode) {
|
||||
errCode = ErrNone
|
||||
|
||||
if values.Get("max-keys") != "" {
|
||||
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
|
||||
var err error
|
||||
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
|
||||
errCode = ErrInvalidMaxKeys
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
@@ -47,44 +54,69 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
|
||||
}
|
||||
}
|
||||
|
||||
if values.Get("max-keys") != "" {
|
||||
var err error
|
||||
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
|
||||
errCode = ErrInvalidMaxKeys
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
token = values.Get("continuation-token")
|
||||
startAfter = values.Get("start-after")
|
||||
delimiter = values.Get("delimiter")
|
||||
if values.Get("max-keys") != "" {
|
||||
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
fetchOwner = values.Get("fetch-owner") == "true"
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse bucket url queries for ?uploads
|
||||
func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string) {
|
||||
func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string, errCode APIErrorCode) {
|
||||
errCode = ErrNone
|
||||
|
||||
if values.Get("max-uploads") != "" {
|
||||
var err error
|
||||
if maxUploads, err = strconv.Atoi(values.Get("max-uploads")); err != nil {
|
||||
errCode = ErrInvalidMaxUploads
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxUploads = maxUploadsList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
keyMarker = values.Get("key-marker")
|
||||
uploadIDMarker = values.Get("upload-id-marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
if values.Get("max-uploads") != "" {
|
||||
maxUploads, _ = strconv.Atoi(values.Get("max-uploads"))
|
||||
} else {
|
||||
maxUploads = maxUploadsList
|
||||
}
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse object url queries
|
||||
func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string) {
|
||||
uploadID = values.Get("uploadId")
|
||||
partNumberMarker, _ = strconv.Atoi(values.Get("part-number-marker"))
|
||||
func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string, errCode APIErrorCode) {
|
||||
var err error
|
||||
errCode = ErrNone
|
||||
|
||||
if values.Get("max-parts") != "" {
|
||||
maxParts, _ = strconv.Atoi(values.Get("max-parts"))
|
||||
if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil {
|
||||
errCode = ErrInvalidMaxParts
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxParts = maxPartsList
|
||||
}
|
||||
|
||||
if values.Get("part-number-marker") != "" {
|
||||
if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil {
|
||||
errCode = ErrInvalidPartNumberMarker
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
uploadID = values.Get("uploadId")
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -156,7 +156,10 @@ func TestListObjectsV1Resources(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
prefix, marker, delimiter, maxKeys, encodingType := getListObjectsV1Args(testCase.values)
|
||||
prefix, marker, delimiter, maxKeys, encodingType, argsErr := getListObjectsV1Args(testCase.values)
|
||||
if argsErr != ErrNone {
|
||||
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
|
||||
}
|
||||
if prefix != testCase.prefix {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.prefix, prefix)
|
||||
}
|
||||
@@ -198,7 +201,10 @@ func TestGetObjectsResources(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
uploadID, partNumberMarker, maxParts, encodingType := getObjectResources(testCase.values)
|
||||
uploadID, partNumberMarker, maxParts, encodingType, argsErr := getObjectResources(testCase.values)
|
||||
if argsErr != ErrNone {
|
||||
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
|
||||
}
|
||||
if uploadID != testCase.uploadID {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.uploadID, uploadID)
|
||||
}
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import "net/http"
|
||||
|
||||
// Represents additional fields necessary for ErrPartTooSmall S3 error.
|
||||
type completeMultipartAPIError struct {
|
||||
// Proposed size represents uploaded size of the part.
|
||||
ProposedSize int64
|
||||
// Minimum size allowed epresents the minimum size allowed per
|
||||
// part. Defaults to 5MB.
|
||||
MinSizeAllowed int64
|
||||
// Part number of the part which is incorrect.
|
||||
PartNumber int
|
||||
// ETag of the part which is incorrect.
|
||||
PartETag string
|
||||
// Other default XML error responses.
|
||||
APIErrorResponse
|
||||
}
|
||||
|
||||
// writeErrorResponsePartTooSmall - function is used specifically to
|
||||
// construct a proper error response during CompleteMultipartUpload
|
||||
// when one of the parts is < 5MB.
|
||||
// The requirement comes due to the fact that generic ErrorResponse
|
||||
// XML doesn't carry the additional fields required to send this
|
||||
// error. So we construct a new type which lies well within the scope
|
||||
// of this function.
|
||||
func writePartSmallErrorResponse(w http.ResponseWriter, r *http.Request, err PartTooSmall) {
|
||||
|
||||
apiError := getAPIError(toAPIErrorCode(err))
|
||||
// Generate complete multipart error response.
|
||||
errorResponse := getAPIErrorResponse(apiError, r.URL.Path, w.Header().Get(responseRequestIDKey))
|
||||
cmpErrResp := completeMultipartAPIError{err.PartSize, int64(5242880), err.PartNumber, err.PartETag, errorResponse}
|
||||
encodedErrorResponse := encodeResponse(cmpErrResp)
|
||||
|
||||
// respond with 400 bad request.
|
||||
w.WriteHeader(apiError.HTTPStatusCode)
|
||||
// Write error body.
|
||||
w.Write(encodedErrorResponse)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
)
|
||||
|
||||
@@ -278,7 +280,7 @@ func getURLScheme(tls bool) string {
|
||||
}
|
||||
|
||||
// getObjectLocation gets the fully qualified URL of an object.
|
||||
func getObjectLocation(r *http.Request, domain, bucket, object string) string {
|
||||
func getObjectLocation(r *http.Request, domains []string, bucket, object string) string {
|
||||
// unit tests do not have host set.
|
||||
if r.Host == "" {
|
||||
return path.Clean(r.URL.Path)
|
||||
@@ -293,15 +295,31 @@ func getObjectLocation(r *http.Request, domain, bucket, object string) string {
|
||||
Scheme: proto,
|
||||
}
|
||||
// If domain is set then we need to use bucket DNS style.
|
||||
if domain != "" {
|
||||
for _, domain := range domains {
|
||||
if strings.Contains(r.Host, domain) {
|
||||
u.Host = bucket + "." + r.Host
|
||||
u.Path = path.Join(slashSeparator, object)
|
||||
break
|
||||
}
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// s3EncodeName encodes string in response when encodingType
|
||||
// is specified in AWS S3 requests.
|
||||
func s3EncodeName(name string, encodingType string) (result string) {
|
||||
// Quick path to exit
|
||||
if encodingType == "" {
|
||||
return name
|
||||
}
|
||||
encodingType = strings.ToLower(encodingType)
|
||||
switch encodingType {
|
||||
case "url":
|
||||
return url.QueryEscape(name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// generates ListBucketsResponse from array of BucketInfo which can be
|
||||
// serialized to match XML and JSON API spec output.
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
@@ -324,7 +342,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
}
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
var contents []Object
|
||||
var prefixes []CommonPrefix
|
||||
var owner = Owner{}
|
||||
@@ -336,7 +354,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
content.Key = object.Name
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
@@ -346,20 +364,20 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max
|
||||
content.Owner = owner
|
||||
contents = append(contents, content)
|
||||
}
|
||||
// TODO - support EncodingType in xml decoding
|
||||
data.Name = bucket
|
||||
data.Contents = contents
|
||||
|
||||
data.Prefix = prefix
|
||||
data.Marker = marker
|
||||
data.Delimiter = delimiter
|
||||
data.EncodingType = encodingType
|
||||
data.Prefix = s3EncodeName(prefix, encodingType)
|
||||
data.Marker = s3EncodeName(marker, encodingType)
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextMarker = resp.NextMarker
|
||||
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = prefix
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
prefixes = append(prefixes, prefixItem)
|
||||
}
|
||||
data.CommonPrefixes = prefixes
|
||||
@@ -367,7 +385,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max
|
||||
}
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string) ListObjectsV2Response {
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string) ListObjectsV2Response {
|
||||
var contents []Object
|
||||
var commonPrefixes []CommonPrefix
|
||||
var owner = Owner{}
|
||||
@@ -382,7 +400,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
content.Key = object.Name
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
@@ -392,20 +410,20 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
content.Owner = owner
|
||||
contents = append(contents, content)
|
||||
}
|
||||
// TODO - support EncodingType in xml decoding
|
||||
data.Name = bucket
|
||||
data.Contents = contents
|
||||
|
||||
data.StartAfter = startAfter
|
||||
data.Delimiter = delimiter
|
||||
data.Prefix = prefix
|
||||
data.EncodingType = encodingType
|
||||
data.StartAfter = s3EncodeName(startAfter, encodingType)
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.Prefix = s3EncodeName(prefix, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
data.ContinuationToken = token
|
||||
data.NextContinuationToken = nextToken
|
||||
data.IsTruncated = isTruncated
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = prefix
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
commonPrefixes = append(commonPrefixes, prefixItem)
|
||||
}
|
||||
data.CommonPrefixes = commonPrefixes
|
||||
@@ -449,11 +467,10 @@ func generateCompleteMultpartUploadResponse(bucket, key, location, etag string)
|
||||
}
|
||||
|
||||
// generates ListPartsResponse from ListPartsInfo.
|
||||
func generateListPartsResponse(partsInfo ListPartsInfo) ListPartsResponse {
|
||||
// TODO - support EncodingType in xml decoding
|
||||
func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) ListPartsResponse {
|
||||
listPartsResponse := ListPartsResponse{}
|
||||
listPartsResponse.Bucket = partsInfo.Bucket
|
||||
listPartsResponse.Key = partsInfo.Object
|
||||
listPartsResponse.Key = s3EncodeName(partsInfo.Object, encodingType)
|
||||
listPartsResponse.UploadID = partsInfo.UploadID
|
||||
listPartsResponse.StorageClass = globalMinioDefaultStorageClass
|
||||
listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID
|
||||
@@ -477,29 +494,29 @@ func generateListPartsResponse(partsInfo ListPartsInfo) ListPartsResponse {
|
||||
}
|
||||
|
||||
// generates ListMultipartUploadsResponse for given bucket and ListMultipartsInfo.
|
||||
func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMultipartsInfo) ListMultipartUploadsResponse {
|
||||
func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMultipartsInfo, encodingType string) ListMultipartUploadsResponse {
|
||||
listMultipartUploadsResponse := ListMultipartUploadsResponse{}
|
||||
listMultipartUploadsResponse.Bucket = bucket
|
||||
listMultipartUploadsResponse.Delimiter = multipartsInfo.Delimiter
|
||||
listMultipartUploadsResponse.Delimiter = s3EncodeName(multipartsInfo.Delimiter, encodingType)
|
||||
listMultipartUploadsResponse.IsTruncated = multipartsInfo.IsTruncated
|
||||
listMultipartUploadsResponse.EncodingType = multipartsInfo.EncodingType
|
||||
listMultipartUploadsResponse.Prefix = multipartsInfo.Prefix
|
||||
listMultipartUploadsResponse.KeyMarker = multipartsInfo.KeyMarker
|
||||
listMultipartUploadsResponse.NextKeyMarker = multipartsInfo.NextKeyMarker
|
||||
listMultipartUploadsResponse.EncodingType = encodingType
|
||||
listMultipartUploadsResponse.Prefix = s3EncodeName(multipartsInfo.Prefix, encodingType)
|
||||
listMultipartUploadsResponse.KeyMarker = s3EncodeName(multipartsInfo.KeyMarker, encodingType)
|
||||
listMultipartUploadsResponse.NextKeyMarker = s3EncodeName(multipartsInfo.NextKeyMarker, encodingType)
|
||||
listMultipartUploadsResponse.MaxUploads = multipartsInfo.MaxUploads
|
||||
listMultipartUploadsResponse.NextUploadIDMarker = multipartsInfo.NextUploadIDMarker
|
||||
listMultipartUploadsResponse.UploadIDMarker = multipartsInfo.UploadIDMarker
|
||||
listMultipartUploadsResponse.CommonPrefixes = make([]CommonPrefix, len(multipartsInfo.CommonPrefixes))
|
||||
for index, commonPrefix := range multipartsInfo.CommonPrefixes {
|
||||
listMultipartUploadsResponse.CommonPrefixes[index] = CommonPrefix{
|
||||
Prefix: commonPrefix,
|
||||
Prefix: s3EncodeName(commonPrefix, encodingType),
|
||||
}
|
||||
}
|
||||
listMultipartUploadsResponse.Uploads = make([]Upload, len(multipartsInfo.Uploads))
|
||||
for index, upload := range multipartsInfo.Uploads {
|
||||
newUpload := Upload{}
|
||||
newUpload.UploadID = upload.UploadID
|
||||
newUpload.Key = upload.Object
|
||||
newUpload.Key = s3EncodeName(upload.Object, encodingType)
|
||||
newUpload.Initiated = upload.Initiated.UTC().Format(timeFormatAMZLong)
|
||||
listMultipartUploadsResponse.Uploads[index] = newUpload
|
||||
}
|
||||
@@ -568,49 +585,93 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
// writeErrorRespone writes error headers
|
||||
func writeErrorResponse(w http.ResponseWriter, errorCode APIErrorCode, reqURL *url.URL) {
|
||||
switch errorCode {
|
||||
case ErrSlowDown, ErrServerNotInitialized, ErrReadQuorum, ErrWriteQuorum:
|
||||
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL, browser bool) {
|
||||
switch err.Code {
|
||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set("Retry-After", "120")
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalIsBrowserEnabled {
|
||||
w.Header().Set("Location", minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
}
|
||||
apiError := getAPIError(errorCode)
|
||||
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(apiError, reqURL.Path, w.Header().Get(responseRequestIDKey))
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path,
|
||||
w.Header().Get(responseRequestIDKey), w.Header().Get(responseDeploymentIDKey))
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
}
|
||||
|
||||
func writeErrorResponseHeadersOnly(w http.ResponseWriter, errorCode APIErrorCode) {
|
||||
apiError := getAPIError(errorCode)
|
||||
writeResponse(w, apiError.HTTPStatusCode, nil, mimeNone)
|
||||
func writeErrorResponseHeadersOnly(w http.ResponseWriter, err APIError) {
|
||||
writeResponse(w, err.HTTPStatusCode, nil, mimeNone)
|
||||
}
|
||||
|
||||
// writeErrorResponseJSON - writes error response in JSON format;
|
||||
// useful for admin APIs.
|
||||
func writeErrorResponseJSON(w http.ResponseWriter, errorCode APIErrorCode, reqURL *url.URL) {
|
||||
apiError := getAPIError(errorCode)
|
||||
func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(apiError, reqURL.Path, w.Header().Get(responseRequestIDKey))
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(responseRequestIDKey), w.Header().Get(responseDeploymentIDKey))
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
// writeCustomErrorResponseJSON - similar to writeErrorResponseJSON,
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
func writeCustomErrorResponseJSON(w http.ResponseWriter, errorCode APIErrorCode,
|
||||
func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError,
|
||||
errBody string, reqURL *url.URL) {
|
||||
|
||||
apiError := getAPIError(errorCode)
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
errorResponse := APIErrorResponse{
|
||||
Code: apiError.Code,
|
||||
Message: errBody,
|
||||
Resource: reqURL.Path,
|
||||
RequestID: "3L137",
|
||||
HostID: "3L137",
|
||||
Code: err.Code,
|
||||
Message: errBody,
|
||||
Resource: reqURL.Path,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
RequestID: w.Header().Get(responseRequestIDKey),
|
||||
HostID: w.Header().Get(responseDeploymentIDKey),
|
||||
}
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
// writeCustomErrorResponseXML - similar to writeErrorResponse,
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err APIError, errBody string, reqURL *url.URL, browser bool) {
|
||||
|
||||
switch err.Code {
|
||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set("Retry-After", "120")
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalIsBrowserEnabled {
|
||||
w.Header().Set("Location", minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
errorResponse := APIErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: errBody,
|
||||
Resource: reqURL.Path,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
RequestID: w.Header().Get(responseRequestIDKey),
|
||||
HostID: w.Header().Get(responseDeploymentIDKey),
|
||||
}
|
||||
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func TestObjectLocation(t *testing.T) {
|
||||
testCases := []struct {
|
||||
request *http.Request
|
||||
bucket, object string
|
||||
domain string
|
||||
domains []string
|
||||
expectedLocation string
|
||||
}{
|
||||
// Server binding to localhost IP with https.
|
||||
@@ -80,7 +80,7 @@ func TestObjectLocation(t *testing.T) {
|
||||
Host: "mys3.bucket.org",
|
||||
Header: map[string][]string{},
|
||||
},
|
||||
domain: "mys3.bucket.org",
|
||||
domains: []string{"mys3.bucket.org"},
|
||||
bucket: "mybucket",
|
||||
object: "test/1.txt",
|
||||
expectedLocation: "http://mybucket.mys3.bucket.org/test/1.txt",
|
||||
@@ -92,14 +92,14 @@ func TestObjectLocation(t *testing.T) {
|
||||
"X-Forwarded-Scheme": {httpsScheme},
|
||||
},
|
||||
},
|
||||
domain: "mys3.bucket.org",
|
||||
domains: []string{"mys3.bucket.org"},
|
||||
bucket: "mybucket",
|
||||
object: "test/1.txt",
|
||||
expectedLocation: "https://mybucket.mys3.bucket.org/test/1.txt",
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
gotLocation := getObjectLocation(testCase.request, testCase.domain, testCase.bucket, testCase.object)
|
||||
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
|
||||
if testCase.expectedLocation != gotLocation {
|
||||
t.Errorf("Test %d: expected %s, got %s", i+1, testCase.expectedLocation, gotLocation)
|
||||
}
|
||||
|
||||
@@ -26,21 +26,26 @@ import (
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
CacheAPI func() CacheObjectLayer
|
||||
// Returns true of handlers should interpret encryption.
|
||||
EncryptionEnabled func() bool
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router) {
|
||||
func registerAPIRouter(router *mux.Router, encryptionEnabled bool) {
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
CacheAPI: newCacheObjectsFn,
|
||||
EncryptionEnabled: func() bool {
|
||||
return encryptionEnabled
|
||||
},
|
||||
}
|
||||
|
||||
// API Router
|
||||
apiRouter := router.PathPrefix("/").Subrouter()
|
||||
var routers []*mux.Router
|
||||
if globalDomainName != "" {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+globalDomainName).Subrouter())
|
||||
for _, domainName := range globalDomainNames {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
@@ -62,6 +67,8 @@ func registerAPIRouter(router *mux.Router) {
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.AbortMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObjectACL - this is a dummy call.
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectACLHandler)).Queries("acl", "")
|
||||
// GetObjectTagging - this is a dummy call.
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectTaggingHandler)).Queries("tagging", "")
|
||||
// SelectObjectContent
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.SelectObjectContentHandler)).Queries("select", "").Queries("select-type", "2")
|
||||
// GetObject
|
||||
@@ -79,8 +86,31 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// GetBucketPolicy
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketPolicyHandler)).Queries("policy", "")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketACLHandler)).Queries("acl", "")
|
||||
// GetBucketCors - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketCorsHandler)).Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketWebsiteHandler)).Queries("website", "")
|
||||
// GetBucketVersioningHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketVersioningHandler)).Queries("versioning", "")
|
||||
// GetBucketAccelerateHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketAccelerateHandler)).Queries("accelerate", "")
|
||||
// GetBucketRequestPaymentHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketRequestPaymentHandler)).Queries("requestPayment", "")
|
||||
// GetBucketLoggingHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLoggingHandler)).Queries("logging", "")
|
||||
// GetBucketLifecycleHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
// GetBucketReplicationHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketReplicationHandler)).Queries("replication", "")
|
||||
// GetBucketTaggingHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketTaggingHandler)).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketWebsiteHandler)).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketTaggingHandler)).Queries("tagging", "")
|
||||
|
||||
// GetBucketNotification
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketNotificationHandler)).Queries("notification", "")
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
@@ -27,8 +28,11 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
@@ -86,6 +90,7 @@ const (
|
||||
authTypeSigned
|
||||
authTypeSignedV2
|
||||
authTypeJWT
|
||||
authTypeSTS
|
||||
)
|
||||
|
||||
// Get request authentication type.
|
||||
@@ -104,6 +109,8 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
return authTypeJWT
|
||||
} else if isRequestPostPolicySignatureV4(r) {
|
||||
return authTypePostPolicy
|
||||
} else if _, ok := r.URL.Query()["Action"]; ok {
|
||||
return authTypeSTS
|
||||
} else if _, ok := r.Header["Authorization"]; !ok {
|
||||
return authTypeAnonymous
|
||||
}
|
||||
@@ -112,43 +119,140 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
|
||||
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(r *http.Request, region string) APIErrorCode {
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, region string) APIErrorCode {
|
||||
s3Err := ErrAccessDenied
|
||||
if _, ok := r.Header["X-Amz-Content-Sha256"]; ok && getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) { // we only support V4 (no presign) with auth. body
|
||||
s3Err = isReqAuthenticated(r, region)
|
||||
if _, ok := r.Header["X-Amz-Content-Sha256"]; ok &&
|
||||
getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) {
|
||||
// We only support admin credentials to access admin APIs.
|
||||
|
||||
var owner bool
|
||||
_, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if !owner {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
// we only support V4 (no presign) with auth body
|
||||
s3Err = isReqAuthenticated(ctx, r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
ctx := logger.SetReqInfo(ctx, reqInfo)
|
||||
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description))
|
||||
}
|
||||
return s3Err
|
||||
}
|
||||
|
||||
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) APIErrorCode {
|
||||
isOwner := true
|
||||
accountName := globalServerConfig.GetCredential().AccessKey
|
||||
// Fetch the security token set by the client.
|
||||
func getSessionToken(r *http.Request) (token string) {
|
||||
token = r.Header.Get("X-Amz-Security-Token")
|
||||
if token != "" {
|
||||
return token
|
||||
}
|
||||
return r.URL.Query().Get("X-Amz-Security-Token")
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
claims, _ := getClaimsFromToken(r)
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
claims := make(map[string]interface{})
|
||||
token := getSessionToken(r)
|
||||
if token == "" {
|
||||
return claims, nil
|
||||
}
|
||||
stsTokenCallback := func(jwtToken *jwtgo.Token) (interface{}, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
// that clients cannot decode the token using the temp
|
||||
// secret keys and generate an entirely new claim by essentially
|
||||
// hijacking the policies. We need to make sure that this is
|
||||
// based an admin credential such that token cannot be decoded
|
||||
// on the client side and is treated like an opaque value.
|
||||
return []byte(globalServerConfig.GetCredential().SecretKey), nil
|
||||
}
|
||||
p := &jwtgo.Parser{
|
||||
ValidMethods: []string{
|
||||
jwtgo.SigningMethodHS256.Alg(),
|
||||
jwtgo.SigningMethodHS512.Alg(),
|
||||
},
|
||||
}
|
||||
jtoken, err := p.ParseWithClaims(token, jwtgo.MapClaims(claims), stsTokenCallback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !jtoken.Valid {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
v, ok := claims["accessKey"]
|
||||
if !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
if _, ok = v.(string); !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
|
||||
token := getSessionToken(r)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
return nil, ErrNoAccessKey
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(r)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(context.Background(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
}
|
||||
|
||||
// Check request auth type verifies the incoming http request
|
||||
// - validates the request signature
|
||||
// - validates the policy action if anonymous tests bucket policies if any,
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown:
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return ErrAccessDenied
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if errorCode := isReqAuthenticatedV2(r); errorCode != ErrNone {
|
||||
return errorCode
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
region := globalServerConfig.GetRegion()
|
||||
switch action {
|
||||
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
|
||||
region = ""
|
||||
}
|
||||
|
||||
if errorCode := isReqAuthenticated(r, region); errorCode != ErrNone {
|
||||
return errorCode
|
||||
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
default:
|
||||
isOwner = false
|
||||
accountName = ""
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
// LocationConstraint is valid only for CreateBucketAction.
|
||||
@@ -174,17 +278,31 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
|
||||
}
|
||||
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: accountName,
|
||||
Action: action,
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: action,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint, ""),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint),
|
||||
IsOwner: isOwner,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
@@ -196,21 +314,21 @@ func isReqAuthenticatedV2(r *http.Request) (s3Error APIErrorCode) {
|
||||
return doesPresignV2SignatureMatch(r)
|
||||
}
|
||||
|
||||
func reqSignatureV4Verify(r *http.Request, region string) (s3Error APIErrorCode) {
|
||||
sha256sum := getContentSha256Cksum(r)
|
||||
func reqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error APIErrorCode) {
|
||||
sha256sum := getContentSha256Cksum(r, stype)
|
||||
switch {
|
||||
case isRequestSignatureV4(r):
|
||||
return doesSignatureMatch(sha256sum, r, region)
|
||||
return doesSignatureMatch(sha256sum, r, region, stype)
|
||||
case isRequestPresignedSignatureV4(r):
|
||||
return doesPresignedSignatureMatch(sha256sum, r, region)
|
||||
return doesPresignedSignatureMatch(sha256sum, r, region, stype)
|
||||
default:
|
||||
return ErrAccessDenied
|
||||
}
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '4'.
|
||||
func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) {
|
||||
if errCode := reqSignatureV4Verify(r, region); errCode != ErrNone {
|
||||
func isReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error APIErrorCode) {
|
||||
if errCode := reqSignatureV4Verify(r, region, stype); errCode != ErrNone {
|
||||
return errCode
|
||||
}
|
||||
|
||||
@@ -244,9 +362,9 @@ func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) {
|
||||
|
||||
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
|
||||
// The verification happens implicit during reading.
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256))
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(err)
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
r.Body = ioutil.NopCloser(reader)
|
||||
return ErrNone
|
||||
@@ -288,12 +406,67 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
} else if aType == authTypeJWT {
|
||||
// Validate Authorization header if its valid for JWT request.
|
||||
if !isHTTPRequestValid(r) {
|
||||
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeSTS {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(w, ErrSignatureVersionNotSupported, r.URL)
|
||||
writeErrorResponse(context.Background(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
|
||||
// isPutAllowed - check if PUT operation is allowed on the resource, this
|
||||
// call verifies bucket policies and IAM policies, supports multi user
|
||||
// checks etc.
|
||||
func isPutAllowed(atype authType, bucketName, objectName string, r *http.Request) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch atype {
|
||||
case authTypeUnknown:
|
||||
return ErrAccessDenied
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
|
||||
region := globalServerConfig.GetRegion()
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", ""),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -377,11 +378,13 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
if s3Error := isReqAuthenticated(testCase.req, globalServerConfig.GetRegion()); s3Error != testCase.s3Error {
|
||||
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %d)", i, testCase.s3Error, s3Error, toAPIErrorCode(err))
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalServerConfig.GetRegion(), serviceS3)
|
||||
if s3Error != testCase.s3Error {
|
||||
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -413,8 +416,9 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
{Request: mustNewPresignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
}
|
||||
ctx := context.Background()
|
||||
for i, testCase := range testCases {
|
||||
if s3Error := checkAdminRequestAuthType(testCase.Request, globalServerConfig.GetRegion()); s3Error != testCase.ErrCode {
|
||||
if s3Error := checkAdminRequestAuthType(ctx, testCase.Request, globalServerConfig.GetRegion()); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,7 +49,6 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
metadata := make(map[string]string)
|
||||
|
||||
md5hex := getMD5Hash(textData)
|
||||
sha256hex := ""
|
||||
@@ -61,7 +60,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -96,14 +95,11 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for NewMultipartUpload.
|
||||
metadata := make(map[string]string)
|
||||
opts := ObjectOptions{}
|
||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, metadata, opts)
|
||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
md5hex := getMD5Hash(textData)
|
||||
sha256hex := ""
|
||||
|
||||
var textPartData []byte
|
||||
@@ -120,10 +116,10 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
} else {
|
||||
textPartData = textData[j*partSize:]
|
||||
}
|
||||
md5hex = getMD5Hash([]byte(textPartData))
|
||||
md5hex := getMD5Hash([]byte(textPartData))
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
|
||||
mustGetHashReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), opts)
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -194,7 +190,6 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// generate etag for the generated data.
|
||||
// etag of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
metadata := make(map[string]string)
|
||||
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
md5hex := getMD5Hash(textData)
|
||||
@@ -204,7 +199,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -234,10 +229,8 @@ func getRandomByte() []byte {
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
// seeding the random number generator.
|
||||
rand.Seed(UTCNow().UnixNano())
|
||||
var b byte
|
||||
// pick a character randomly.
|
||||
b = letterBytes[rand.Intn(len(letterBytes))]
|
||||
return []byte{b}
|
||||
return []byte{letterBytes[rand.Intn(len(letterBytes))]}
|
||||
}
|
||||
|
||||
// picks a random byte and repeats it to size bytes.
|
||||
@@ -289,7 +282,6 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
metadata := make(map[string]string)
|
||||
|
||||
md5hex := getMD5Hash([]byte(textData))
|
||||
sha256hex := ""
|
||||
@@ -304,7 +296,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
for pb.Next() {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -335,7 +327,6 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
metadata := make(map[string]string)
|
||||
|
||||
md5hex := getMD5Hash([]byte(textData))
|
||||
sha256hex := ""
|
||||
@@ -344,7 +335,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
172
cmd/bitrot-streaming.go
Normal file
172
cmd/bitrot-streaming.go
Normal file
@@ -0,0 +1,172 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2019 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow *io.PipeWriter
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
canClose chan struct{} // Needed to avoid race explained in Close() call.
|
||||
|
||||
// Following two fields are used only to make sure that Write(p) is called such that
|
||||
// len(p) is always the block size except the last block, i.e prevent programmer errors.
|
||||
currentBlockIdx int
|
||||
verifyTillIdx int
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
if b.currentBlockIdx < b.verifyTillIdx && int64(len(p)) != b.shardSize {
|
||||
// All blocks except last should be of the length b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
b.h.Reset()
|
||||
b.h.Write(p)
|
||||
hashBytes := b.h.Sum(nil)
|
||||
n, err := b.iow.Write(hashBytes)
|
||||
if n != len(hashBytes) {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
n, err = b.iow.Write(p)
|
||||
b.currentBlockIdx++
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Close() error {
|
||||
err := b.iow.Close()
|
||||
// Wait for all data to be written before returning else it causes race conditions.
|
||||
// Race condition is because of io.PipeWriter implementation. i.e consider the following
|
||||
// sequent of operations:
|
||||
// 1) pipe.Write()
|
||||
// 2) pipe.Close()
|
||||
// Now pipe.Close() can return before the data is read on the other end of the pipe and written to the disk
|
||||
// Hence an immediate Read() on the file can return incorrect data.
|
||||
<-b.canClose
|
||||
return err
|
||||
}
|
||||
|
||||
// Returns streaming bitrot writer implementation.
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
|
||||
r, w := io.Pipe()
|
||||
h := algo.New()
|
||||
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{}), 0, int(length / shardSize)}
|
||||
go func() {
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize := bitrotSumsTotalSize + length
|
||||
err := disk.CreateFile(volume, filePath, totalFileSize, r)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
r.CloseWithError(err)
|
||||
}
|
||||
close(bw.canClose)
|
||||
}()
|
||||
return bw
|
||||
}
|
||||
|
||||
// ReadAt() implementation which verifies the bitrot hash available as part of the stream.
|
||||
type streamingBitrotReader struct {
|
||||
disk StorageAPI
|
||||
rc io.ReadCloser
|
||||
volume string
|
||||
filePath string
|
||||
tillOffset int64
|
||||
currOffset int64
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
hashBytes []byte
|
||||
}
|
||||
|
||||
func (b *streamingBitrotReader) Close() error {
|
||||
if b.rc == nil {
|
||||
return nil
|
||||
}
|
||||
return b.rc.Close()
|
||||
}
|
||||
|
||||
func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
var err error
|
||||
if offset%b.shardSize != 0 {
|
||||
// Offset should always be aligned to b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
if b.rc == nil {
|
||||
// For the first ReadAt() call we need to open the stream for reading.
|
||||
b.currOffset = offset
|
||||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
||||
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if offset != b.currOffset {
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
b.h.Reset()
|
||||
_, err = io.ReadFull(b.rc, b.hashBytes)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
_, err = io.ReadFull(b.rc, buf)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err = hashMismatchError{hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))}
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
b.currOffset += int64(len(buf))
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
// Returns streaming bitrot reader implementation.
|
||||
func newStreamingBitrotReader(disk StorageAPI, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
|
||||
h := algo.New()
|
||||
return &streamingBitrotReader{
|
||||
disk,
|
||||
nil,
|
||||
volume,
|
||||
filePath,
|
||||
ceilFrac(tillOffset, shardSize)*int64(h.Size()) + tillOffset,
|
||||
0,
|
||||
h,
|
||||
shardSize,
|
||||
make([]byte, h.Size()),
|
||||
}
|
||||
}
|
||||
109
cmd/bitrot-whole.go
Normal file
109
cmd/bitrot-whole.go
Normal file
@@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2019 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
// Implementation to calculate bitrot for the whole file.
|
||||
type wholeBitrotWriter struct {
|
||||
disk StorageAPI
|
||||
volume string
|
||||
filePath string
|
||||
shardSize int64 // This is the shard size of the erasure logic
|
||||
hash.Hash // For bitrot hash
|
||||
|
||||
// Following two fields are used only to make sure that Write(p) is called such that
|
||||
// len(p) is always the block size except the last block and prevent programmer errors.
|
||||
currentBlockIdx int
|
||||
lastBlockIdx int
|
||||
}
|
||||
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
if b.currentBlockIdx < b.lastBlockIdx && int64(len(p)) != b.shardSize {
|
||||
// All blocks except last should be of the length b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
err := b.disk.AppendFile(b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
_, err = b.Hash.Write(p)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
b.currentBlockIdx++
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (b *wholeBitrotWriter) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns whole-file bitrot writer.
|
||||
func newWholeBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
|
||||
return &wholeBitrotWriter{disk, volume, filePath, shardSize, algo.New(), 0, int(length / shardSize)}
|
||||
}
|
||||
|
||||
// Implementation to verify bitrot for the whole file.
|
||||
type wholeBitrotReader struct {
|
||||
disk StorageAPI
|
||||
volume string
|
||||
filePath string
|
||||
verifier *BitrotVerifier // Holds the bit-rot info
|
||||
tillOffset int64 // Affects the length of data requested in disk.ReadFile depending on Read()'s offset
|
||||
buf []byte // Holds bit-rot verified data
|
||||
}
|
||||
|
||||
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) {
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
ctx := context.Background()
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
|
||||
logger.LogIf(ctx, err)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(context.Background(), errLessData)
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
b.buf = b.buf[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Returns whole-file bitrot reader.
|
||||
func newWholeBitrotReader(disk StorageAPI, volume, filePath string, algo BitrotAlgorithm, tillOffset int64, sum []byte) *wholeBitrotReader {
|
||||
return &wholeBitrotReader{
|
||||
disk: disk,
|
||||
volume: volume,
|
||||
filePath: filePath,
|
||||
verifier: &BitrotVerifier{algo, sum},
|
||||
tillOffset: tillOffset,
|
||||
buf: nil,
|
||||
}
|
||||
}
|
||||
137
cmd/bitrot.go
137
cmd/bitrot.go
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/highwayhash"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -38,19 +39,22 @@ const (
|
||||
SHA256 BitrotAlgorithm = 1 + iota
|
||||
// HighwayHash256 represents the HighwayHash-256 hash function
|
||||
HighwayHash256
|
||||
// HighwayHash256S represents the Streaming HighwayHash-256 hash function
|
||||
HighwayHash256S
|
||||
// BLAKE2b512 represents the BLAKE2b-512 hash function
|
||||
BLAKE2b512
|
||||
)
|
||||
|
||||
// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection.
|
||||
const (
|
||||
DefaultBitrotAlgorithm = HighwayHash256
|
||||
DefaultBitrotAlgorithm = HighwayHash256S
|
||||
)
|
||||
|
||||
var bitrotAlgorithms = map[BitrotAlgorithm]string{
|
||||
SHA256: "sha256",
|
||||
BLAKE2b512: "blake2b",
|
||||
HighwayHash256: "highwayhash256",
|
||||
SHA256: "sha256",
|
||||
BLAKE2b512: "blake2b",
|
||||
HighwayHash256: "highwayhash256",
|
||||
HighwayHash256S: "highwayhash256S",
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash calculating the given bitrot algorithm.
|
||||
@@ -64,6 +68,9 @@ func (a BitrotAlgorithm) New() hash.Hash {
|
||||
case HighwayHash256:
|
||||
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
|
||||
return hh
|
||||
case HighwayHash256S:
|
||||
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
|
||||
return hh
|
||||
default:
|
||||
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
|
||||
return nil
|
||||
@@ -109,86 +116,72 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
|
||||
return
|
||||
}
|
||||
|
||||
// To read bit-rot verified data.
|
||||
type bitrotReader struct {
|
||||
disk StorageAPI
|
||||
volume string
|
||||
filePath string
|
||||
verifier *BitrotVerifier // Holds the bit-rot info
|
||||
endOffset int64 // Affects the length of data requested in disk.ReadFile depending on Read()'s offset
|
||||
buf []byte // Holds bit-rot verified data
|
||||
}
|
||||
|
||||
// newBitrotReader returns bitrotReader.
|
||||
// Note that the buffer is allocated later in Read(). This is because we will know the buffer length only
|
||||
// during the bitrotReader.Read(). Depending on when parallelReader fails-over, the buffer length can be different.
|
||||
func newBitrotReader(disk StorageAPI, volume, filePath string, algo BitrotAlgorithm, endOffset int64, sum []byte) *bitrotReader {
|
||||
return &bitrotReader{
|
||||
disk: disk,
|
||||
volume: volume,
|
||||
filePath: filePath,
|
||||
verifier: &BitrotVerifier{algo, sum},
|
||||
endOffset: endOffset,
|
||||
buf: nil,
|
||||
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
|
||||
}
|
||||
return newWholeBitrotWriter(disk, volume, filePath, length, algo, shardSize)
|
||||
}
|
||||
|
||||
// ReadChunk returns requested data.
|
||||
func (b *bitrotReader) ReadChunk(offset int64, length int64) ([]byte, error) {
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.endOffset-offset)
|
||||
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
ctx := context.Background()
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
func newBitrotReader(disk StorageAPI, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotReader(disk, bucket, filePath, tillOffset, algo, shardSize)
|
||||
}
|
||||
return newWholeBitrotReader(disk, bucket, filePath, algo, tillOffset, sum)
|
||||
}
|
||||
|
||||
// Close all the readers.
|
||||
func closeBitrotReaders(rs []io.ReaderAt) {
|
||||
for _, r := range rs {
|
||||
if br, ok := r.(*streamingBitrotReader); ok {
|
||||
br.Close()
|
||||
}
|
||||
}
|
||||
if int64(len(b.buf)) < length {
|
||||
logger.LogIf(context.Background(), errLessData)
|
||||
return nil, errLessData
|
||||
}
|
||||
retBuf := b.buf[:length]
|
||||
b.buf = b.buf[length:]
|
||||
return retBuf, nil
|
||||
}
|
||||
|
||||
// To calculate the bit-rot of the written data.
|
||||
type bitrotWriter struct {
|
||||
disk StorageAPI
|
||||
volume string
|
||||
filePath string
|
||||
h hash.Hash
|
||||
}
|
||||
|
||||
// newBitrotWriter returns bitrotWriter.
|
||||
func newBitrotWriter(disk StorageAPI, volume, filePath string, algo BitrotAlgorithm) *bitrotWriter {
|
||||
return &bitrotWriter{
|
||||
disk: disk,
|
||||
volume: volume,
|
||||
filePath: filePath,
|
||||
h: algo.New(),
|
||||
// Close all the writers.
|
||||
func closeBitrotWriters(ws []io.Writer) {
|
||||
for _, w := range ws {
|
||||
if bw, ok := w.(*streamingBitrotWriter); ok {
|
||||
bw.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Append appends the data and while calculating the hash.
|
||||
func (b *bitrotWriter) Append(buf []byte) error {
|
||||
n, err := b.h.Write(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != len(buf) {
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return errUnexpected
|
||||
}
|
||||
if err = b.disk.AppendFile(b.volume, b.filePath, buf); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
// Returns hash sum for whole-bitrot, nil for streaming-bitrot.
|
||||
func bitrotWriterSum(w io.Writer) []byte {
|
||||
if bw, ok := w.(*wholeBitrotWriter); ok {
|
||||
return bw.Sum(nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sum returns bit-rot sum.
|
||||
func (b *bitrotWriter) Sum() []byte {
|
||||
return b.h.Sum(nil)
|
||||
// Verify if a file has bitrot error.
|
||||
func bitrotCheckFile(disk StorageAPI, volume string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) (err error) {
|
||||
if algo != HighwayHash256S {
|
||||
buf := []byte{}
|
||||
// For whole-file bitrot we don't need to read the entire file as the bitrot verify happens on the server side even if we read 0-bytes.
|
||||
_, err = disk.ReadFile(volume, filePath, 0, buf, NewBitrotVerifier(algo, sum))
|
||||
return err
|
||||
}
|
||||
buf := make([]byte, shardSize)
|
||||
r := newStreamingBitrotReader(disk, volume, filePath, tillOffset, algo, shardSize)
|
||||
defer closeBitrotReaders([]io.ReaderAt{r})
|
||||
var offset int64
|
||||
for {
|
||||
if offset == tillOffset {
|
||||
break
|
||||
}
|
||||
var n int
|
||||
tmpBuf := buf
|
||||
if int64(len(tmpBuf)) > (tillOffset - offset) {
|
||||
tmpBuf = tmpBuf[:(tillOffset - offset)]
|
||||
}
|
||||
n, err = r.ReadAt(tmpBuf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offset += int64(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,13 +17,14 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBitrotReaderWriter(t *testing.T) {
|
||||
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
tmpDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -40,32 +41,44 @@ func TestBitrotReaderWriter(t *testing.T) {
|
||||
|
||||
disk.MakeVol(volume)
|
||||
|
||||
writer := newBitrotWriter(disk, volume, filePath, HighwayHash256)
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
|
||||
|
||||
err = writer.Append([]byte("aaaaaaaaa"))
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = writer.Append([]byte("a"))
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = writer.Append([]byte("aaaaaaaaaa"))
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = writer.Append([]byte("aaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = writer.Append([]byte("aaaaaaaaaa"))
|
||||
_, err = writer.Write([]byte("aaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
writer.(io.Closer).Close()
|
||||
|
||||
reader := newBitrotReader(disk, volume, filePath, HighwayHash256, 35, writer.Sum())
|
||||
|
||||
if _, err = reader.ReadChunk(0, 35); err != nil {
|
||||
reader := newBitrotReader(disk, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
|
||||
b := make([]byte, 10)
|
||||
if _, err = reader.ReadAt(b, 0); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b, 10); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b, 20); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b[:5], 30); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllBitrotAlgorithms(t *testing.T) {
|
||||
for bitrotAlgo := range bitrotAlgorithms {
|
||||
testBitrotReaderWriterAlgo(t, bitrotAlgo)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,9 +18,11 @@ package cmd
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
@@ -31,12 +33,19 @@ import (
|
||||
// - delimiter if set should be equal to '/', otherwise the request is rejected.
|
||||
// - marker if set should have a common prefix with 'prefix' param, otherwise
|
||||
// the request is rejected.
|
||||
func validateListObjectsArgs(prefix, marker, delimiter string, maxKeys int) APIErrorCode {
|
||||
func validateListObjectsArgs(prefix, marker, delimiter, encodingType string, maxKeys int) APIErrorCode {
|
||||
// Max keys cannot be negative.
|
||||
if maxKeys < 0 {
|
||||
return ErrInvalidMaxKeys
|
||||
}
|
||||
|
||||
if encodingType != "" {
|
||||
// Only url encoding type is supported
|
||||
if strings.ToLower(encodingType) != "url" {
|
||||
return ErrInvalidEncodingMethod
|
||||
}
|
||||
}
|
||||
|
||||
/// Minio special conditions for ListObjects.
|
||||
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
@@ -58,36 +67,38 @@ func validateListObjectsArgs(prefix, marker, delimiter string, maxKeys int) APIE
|
||||
func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV2", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
urlValues := r.URL.Query()
|
||||
|
||||
// Extract all the listObjectsV2 query params to their native values.
|
||||
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, _, errCode := getListObjectsV2Args(urlValues)
|
||||
|
||||
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(w, errCode, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the query params before beginning to serve the request.
|
||||
// fetch-owner is not validated since it is a boolean
|
||||
if s3Error := validateListObjectsArgs(prefix, token, delimiter, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
if s3Error := validateListObjectsArgs(prefix, token, delimiter, encodingType, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
listObjectsV2 := objectAPI.ListObjectsV2
|
||||
if api.CacheAPI() != nil {
|
||||
listObjectsV2 = api.CacheAPI().ListObjectsV2
|
||||
@@ -97,22 +108,33 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
// marshaled into S3 compatible XML header.
|
||||
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsV2Info.Objects {
|
||||
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
var actualSize int64
|
||||
if listObjectsV2Info.Objects[i].IsCompressed() {
|
||||
// Read the decompressed size from the meta.json.
|
||||
actualSize = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if actualSize < 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDecompressedSize), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Set the info.Size to the actualSize.
|
||||
listObjectsV2Info.Objects[i].Size = actualSize
|
||||
} else if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].DecryptedSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
delimiter, fetchOwner, listObjectsV2Info.IsTruncated, maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes)
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated, maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
@@ -127,57 +149,70 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV1")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV1", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Extract all the litsObjectsV1 query params to their native values.
|
||||
prefix, marker, delimiter, maxKeys, _ := getListObjectsV1Args(r.URL.Query())
|
||||
|
||||
// Validate the maxKeys lowerbound. When maxKeys > 1000, S3 returns 1000 but
|
||||
// does not throw an error.
|
||||
if maxKeys < 0 {
|
||||
writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
|
||||
return
|
||||
} // Validate all the query params before beginning to serve the request.
|
||||
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.URL.Query())
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate all the query params before beginning to serve the request.
|
||||
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, encodingType, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
listObjects := objectAPI.ListObjects
|
||||
if api.CacheAPI() != nil {
|
||||
listObjects = api.CacheAPI().ListObjects
|
||||
}
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshaled into S3 compatible XML header.
|
||||
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsInfo.Objects {
|
||||
if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
||||
var actualSize int64
|
||||
if listObjectsInfo.Objects[i].IsCompressed() {
|
||||
// Read the decompressed size from the meta.json.
|
||||
actualSize = listObjectsInfo.Objects[i].GetActualSize()
|
||||
if actualSize < 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDecompressedSize), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Set the info.Size to the actualSize.
|
||||
listObjectsInfo.Objects[i].Size = actualSize
|
||||
} else if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
||||
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
|
||||
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].DecryptedSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, maxKeys, listObjectsInfo)
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
|
||||
@@ -89,17 +89,19 @@ func initFederatorBackend(objLayer ObjectLayer) {
|
||||
func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketLocation")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketLocation", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketLocationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -108,7 +110,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
getBucketInfo = api.CacheAPI().GetBucketInfo
|
||||
}
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -137,40 +139,48 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListMultipartUploads")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListMultipartUploads", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketMultipartUploadsAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, _ := getBucketMultipartResources(r.URL.Query())
|
||||
if maxUploads < 0 {
|
||||
writeErrorResponse(w, ErrInvalidMaxUploads, r.URL)
|
||||
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode := getBucketMultipartResources(r.URL.Query())
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if maxUploads < 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxUploads), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if keyMarker != "" {
|
||||
// Marker not common with prefix is not implemented.
|
||||
if !hasPrefix(keyMarker, prefix) {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
listMultipartsInfo, err := objectAPI.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// generate response
|
||||
response := generateListMultipartUploadsResponse(bucket, listMultipartsInfo)
|
||||
response := generateListMultipartUploadsResponse(bucket, listMultipartsInfo, encodingType)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
|
||||
// write success response.
|
||||
@@ -184,27 +194,30 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBuckets")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListBuckets", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
if api.CacheAPI() != nil {
|
||||
listBuckets = api.CacheAPI().ListBuckets
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListAllMyBucketsAction, "", ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
var bucketsInfo []BucketInfo
|
||||
if globalDNSConfig != nil {
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && err != dns.ErrNoEntriesFound {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
bucketSet := set.NewStringSet()
|
||||
@@ -223,7 +236,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
var err error
|
||||
bucketsInfo, err = listBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -240,12 +253,14 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteMultipleObjects")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteMultipleObjects", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -254,7 +269,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// In the event access is denied, a 200 response should still be returned
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if s3Error != ErrAccessDenied {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -262,14 +277,14 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// Content-Length is required and should be non-zero
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Content-Md5 is requied should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if _, ok := r.Header["Content-Md5"]; !ok {
|
||||
writeErrorResponse(w, ErrMissingContentMD5, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -285,7 +300,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// Read incoming body XML bytes.
|
||||
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
writeErrorResponse(ctx, w, toAdminAPIErr(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -293,7 +308,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
deleteObjects := &DeleteObjectsRequest{}
|
||||
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrMalformedXML, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -301,7 +316,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
if globalWORMEnabled {
|
||||
// Not required to check whether given objects exist or not, because
|
||||
// DeleteMultipleObject is always successful irrespective of object existence.
|
||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -340,10 +355,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
deletedObjects = append(deletedObjects, object)
|
||||
continue
|
||||
}
|
||||
apiErr := toAPIError(ctx, err)
|
||||
// Error during delete should be collected separately.
|
||||
deleteErrors = append(deleteErrors, DeleteError{
|
||||
Code: errorCodeResponse[toAPIErrorCode(err)].Code,
|
||||
Message: errorCodeResponse[toAPIErrorCode(err)].Description,
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
})
|
||||
}
|
||||
@@ -370,10 +386,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
Object: ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
},
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
ReqParams: extractReqParams(r),
|
||||
RespElements: extractRespElements(w),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -384,9 +401,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucket", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -394,21 +413,21 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.CreateBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Parse incoming location constraint.
|
||||
location, s3Error := parseLocationConstraint(r)
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if location sent by the client is valid, reject
|
||||
// requests which do not follow valid region requirements.
|
||||
if !isValidLocation(location) {
|
||||
writeErrorResponse(w, ErrInvalidRegion, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRegion), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -417,33 +436,33 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
if err == dns.ErrNoEntriesFound {
|
||||
// Proceed to creating a bucket.
|
||||
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if err = globalDNSConfig.Put(bucket); err != nil {
|
||||
objectAPI.DeleteBucket(ctx, bucket)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
w.Header().Set("Location", getObjectLocation(r, globalDomainName, bucket, ""))
|
||||
w.Header().Set("Location", getObjectLocation(r, globalDomainNames, bucket, ""))
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
}
|
||||
writeErrorResponse(w, ErrBucketAlreadyOwnedByYou, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBucketAlreadyOwnedByYou), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Proceed to creating a bucket.
|
||||
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -460,9 +479,21 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PostPolicyBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "PostPolicyBucket", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !api.EncryptionEnabled() && hasServerSideEncryptionHeader(r.Header) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -471,17 +502,17 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// Require Content-Length to be set in the request
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
resource, err := getResource(r.URL.Path, r.Host, globalDomainName)
|
||||
resource, err := getResource(r.URL.Path, r.Host, globalDomainNames)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInvalidRequest, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Make sure that the URL does not contain object name.
|
||||
if bucket != filepath.Clean(resource[1:]) {
|
||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -490,7 +521,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
reader, err := r.MultipartReader()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -498,7 +529,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
form, err := reader.ReadForm(maxFormMemory)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -509,13 +540,13 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(ctx, form)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if file is provided, error out otherwise.
|
||||
if fileBody == nil {
|
||||
writeErrorResponse(w, ErrPOSTFileRequired, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPOSTFileRequired), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -537,66 +568,83 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if successRedirect != "" {
|
||||
redirectURL, err = url.Parse(successRedirect)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Verify policy signature.
|
||||
apiErr := doesPolicySignatureMatch(formValues)
|
||||
if apiErr != ErrNone {
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
errCode := doesPolicySignatureMatch(formValues)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure formValues adhere to policy restrictions.
|
||||
if apiErr = checkPostPolicy(formValues, postPolicyForm); apiErr != ErrNone {
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that the object size is within expected range, also the file size
|
||||
// should not exceed the maximum single Put size (5 GiB)
|
||||
lengthRange := postPolicyForm.Conditions.ContentLengthRange
|
||||
if lengthRange.Valid {
|
||||
if fileSize < lengthRange.Min {
|
||||
writeErrorResponse(w, toAPIErrorCode(errDataTooSmall), r.URL)
|
||||
// Handle policy if it is set.
|
||||
if len(policyBytes) > 0 {
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if fileSize > lengthRange.Max || isMaxObjectSize(fileSize) {
|
||||
writeErrorResponse(w, toAPIErrorCode(errDataTooLarge), r.URL)
|
||||
// Make sure formValues adhere to policy restrictions.
|
||||
if err = checkPostPolicy(formValues, postPolicyForm); err != nil {
|
||||
writeCustomErrorResponseXML(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), err.Error(), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that the object size is within expected range, also the file size
|
||||
// should not exceed the maximum single Put size (5 GiB)
|
||||
lengthRange := postPolicyForm.Conditions.ContentLengthRange
|
||||
if lengthRange.Valid {
|
||||
if fileSize < lengthRange.Min {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errDataTooSmall), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if fileSize > lengthRange.Max || isMaxObjectSize(fileSize) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errDataTooLarge), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract metadata to be saved from received Form.
|
||||
metadata := make(map[string]string)
|
||||
err = extractMetadataFromMap(ctx, formValues, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "")
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
rawReader := hashReader
|
||||
pReader := NewPutObjReader(rawReader, nil, nil)
|
||||
var objectEncryptionKey []byte
|
||||
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
}
|
||||
// get gateway encryption options
|
||||
var opts ObjectOptions
|
||||
opts, err = putOpts(ctx, r, bucket, object, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C and SSE-S3 requests
|
||||
var reader io.Reader
|
||||
@@ -604,31 +652,32 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if crypto.SSEC.IsRequested(formValues) {
|
||||
key, err = ParseSSECustomerHeader(formValues)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
reader, err = newEncryptReader(hashReader, key, bucket, object, metadata, crypto.S3.IsRequested(formValues))
|
||||
reader, objectEncryptionKey, err = newEncryptReader(hashReader, key, bucket, object, metadata, crypto.S3.IsRequested(formValues))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: fileSize}
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "") // do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize) // do not try to verify encrypted content
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
pReader = NewPutObjReader(rawReader, hashReader, objectEncryptionKey)
|
||||
}
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, hashReader, metadata, ObjectOptions{})
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
location := getObjectLocation(r, globalDomainName, bucket, object)
|
||||
location := getObjectLocation(r, globalDomainNames, bucket, object)
|
||||
w.Header().Set("ETag", `"`+objInfo.ETag+`"`)
|
||||
w.Header().Set("Location", location)
|
||||
|
||||
@@ -640,13 +689,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Notify object created event.
|
||||
defer sendEvent(eventArgs{
|
||||
EventName: event.ObjectCreatedPost,
|
||||
BucketName: objInfo.Bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
EventName: event.ObjectCreatedPost,
|
||||
BucketName: objInfo.Bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
RespElements: extractRespElements(w),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
})
|
||||
|
||||
if successRedirect != "" {
|
||||
@@ -682,17 +732,19 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HeadBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "HeadBucket", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseHeadersOnly(w, ErrServerNotInitialized)
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrServerNotInitialized))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponseHeadersOnly(w, s3Error)
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -701,7 +753,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
getBucketInfo = api.CacheAPI().GetBucketInfo
|
||||
}
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -712,17 +764,19 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucket", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -732,7 +786,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
// Attempt to delete bucket.
|
||||
if err := deleteBucket(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -744,7 +798,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
// Deleting DNS entry failed, attempt to create the bucket again.
|
||||
objectAPI.MakeBucketWithLocation(ctx, bucket, "")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,12 +95,12 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
|
||||
}
|
||||
if !bytes.Equal(testCase.locationResponse, rec.Body.Bytes()) && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.locationResponse), string(rec.Body.Bytes()))
|
||||
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.locationResponse), rec.Body.String())
|
||||
}
|
||||
errorResponse := APIErrorResponse{}
|
||||
err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse)
|
||||
if err != nil && !testCase.shouldPass {
|
||||
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, string(rec.Body.Bytes()))
|
||||
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, rec.Body.String())
|
||||
}
|
||||
if errorResponse.Resource != testCase.errorResponse.Resource {
|
||||
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
|
||||
@@ -131,7 +131,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
errorResponse = APIErrorResponse{}
|
||||
err = xml.Unmarshal(recV2.Body.Bytes(), &errorResponse)
|
||||
if err != nil && !testCase.shouldPass {
|
||||
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, string(recV2.Body.Bytes()))
|
||||
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, recV2.Body.String())
|
||||
}
|
||||
if errorResponse.Resource != testCase.errorResponse.Resource {
|
||||
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
|
||||
@@ -625,7 +625,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
for i := 0; i < 10; i++ {
|
||||
objectName := "test-object-" + strconv.Itoa(i)
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil, ObjectOptions{})
|
||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
||||
@@ -645,8 +645,8 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
getDeleteErrorList := func(objects []ObjectIdentifier) (deleteErrorList []DeleteError) {
|
||||
for _, obj := range objects {
|
||||
deleteErrorList = append(deleteErrorList, DeleteError{
|
||||
Code: errorCodeResponse[ErrAccessDenied].Code,
|
||||
Message: errorCodeResponse[ErrAccessDenied].Description,
|
||||
Code: errorCodes[ErrAccessDenied].Code,
|
||||
Message: errorCodes[ErrAccessDenied].Description,
|
||||
Key: obj.ObjectName,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -44,28 +44,30 @@ var errNoSuchNotifications = errors.New("The specified bucket does not have buck
|
||||
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketNotification", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucketName)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -74,17 +76,21 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
if err != nil {
|
||||
// Ignore errNoSuchNotifications to comply with AWS S3.
|
||||
if err != errNoSuchNotifications {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
nConfig = &event.Config{}
|
||||
}
|
||||
|
||||
// If xml namespace is empty, set a default value before returning.
|
||||
if nConfig.XMLNS == "" {
|
||||
nConfig.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
}
|
||||
|
||||
notificationBytes, err := xml.Marshal(nConfig)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -96,14 +102,16 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketNotification", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !objectAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -111,36 +119,36 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
_, err := objectAPI.GetBucketInfo(ctx, bucketName)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// PutBucketNotification always needs a Content-Length.
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
var config *event.Config
|
||||
config, err = event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalServerConfig.GetRegion(), globalNotificationSys.targetList)
|
||||
if err != nil {
|
||||
apiErr := ErrMalformedXML
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
if event.IsEventError(err) {
|
||||
apiErr = toAPIErrorCode(err)
|
||||
apiErr = toAPIError(ctx, err)
|
||||
}
|
||||
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = saveNotificationConfig(objectAPI, bucketName, config); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
if err = saveNotificationConfig(ctx, objectAPI, bucketName, config); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -156,22 +164,29 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListenBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListenBucketNotification", mustGetClaimsFromToken(r))
|
||||
|
||||
// Validate if bucket exists.
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
if !objAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsListenBucketSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListenBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -179,11 +194,13 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
var prefix string
|
||||
if len(values["prefix"]) > 1 {
|
||||
writeErrorResponse(w, ErrFilterNamePrefix, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNamePrefix), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if len(values["prefix"]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values["prefix"][0]); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -192,11 +209,13 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
var suffix string
|
||||
if len(values["suffix"]) > 1 {
|
||||
writeErrorResponse(w, ErrFilterNameSuffix, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNameSuffix), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if len(values["suffix"]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values["suffix"][0]); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -209,7 +228,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
for _, s := range values["events"] {
|
||||
eventName, err := event.ParseName(s)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -217,19 +236,19 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
}
|
||||
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucketName); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
host, err := xnet.ParseHost(r.RemoteAddr)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
target, err := target.NewHTTPClientTarget(*host, w)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -237,8 +256,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
if err = globalNotificationSys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
defer globalNotificationSys.RemoveRemoteTarget(bucketName, target.ID())
|
||||
@@ -246,14 +264,13 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
thisAddr, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = SaveListener(objAPI, bucketName, eventNames, pattern, target.ID(), *thisAddr); err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -263,8 +280,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
if err = RemoveListener(objAPI, bucketName, target.ID(), *thisAddr); err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,9 +40,11 @@ const (
|
||||
func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -50,43 +52,43 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Error out if Content-Length is missing.
|
||||
// PutBucketPolicy always needs Content-Length.
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Error out if Content-Length is beyond allowed size.
|
||||
if r.ContentLength > maxBucketPolicySize {
|
||||
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucketPolicy, err := policy.ParseConfig(io.LimitReader(r.Body, r.ContentLength), bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrMalformedPolicy, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Version in policy must not be empty
|
||||
if bucketPolicy.Version == "" {
|
||||
writeErrorResponse(w, ErrMalformedPolicy, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = objAPI.SetBucketPolicy(ctx, bucket, bucketPolicy); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -101,9 +103,11 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -111,18 +115,18 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err := objAPI.DeleteBucketPolicy(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -137,9 +141,11 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -147,27 +153,26 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Read bucket access policy.
|
||||
bucketPolicy, err := objAPI.GetBucketPolicy(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
policyData, err := json.Marshal(bucketPolicy)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
56
cmd/certs.go
56
cmd/certs.go
@@ -68,24 +68,6 @@ func parsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err er
|
||||
}
|
||||
|
||||
func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
|
||||
// Get all CA file names.
|
||||
var caFiles []string
|
||||
fis, err := readDir(certsCAsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, fi := range fis {
|
||||
// Skip all directories.
|
||||
if hasSuffix(fi, slashSeparator) {
|
||||
continue
|
||||
}
|
||||
// We are only interested in regular files here.
|
||||
caFiles = append(caFiles, pathJoin(certsCAsDir, fi))
|
||||
}
|
||||
if len(caFiles) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
if rootCAs == nil {
|
||||
// In some systems (like Windows) system cert pool is
|
||||
@@ -94,16 +76,26 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
|
||||
rootCAs = x509.NewCertPool()
|
||||
}
|
||||
|
||||
// Load custom root CAs for client requests
|
||||
for _, caFile := range caFiles {
|
||||
caCert, err := ioutil.ReadFile(caFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
fis, err := readDir(certsCAsDir)
|
||||
if err != nil {
|
||||
if err == errFileNotFound {
|
||||
err = nil // Return success if CA's directory is missing.
|
||||
}
|
||||
|
||||
rootCAs.AppendCertsFromPEM(caCert)
|
||||
return rootCAs, err
|
||||
}
|
||||
|
||||
// Load all custom CA files.
|
||||
for _, fi := range fis {
|
||||
// Skip all directories.
|
||||
if hasSuffix(fi, slashSeparator) {
|
||||
continue
|
||||
}
|
||||
caCert, err := ioutil.ReadFile(pathJoin(certsCAsDir, fi))
|
||||
if err != nil {
|
||||
return rootCAs, err
|
||||
}
|
||||
rootCAs.AppendCertsFromPEM(caCert)
|
||||
}
|
||||
return rootCAs, nil
|
||||
}
|
||||
|
||||
@@ -150,24 +142,20 @@ func loadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
func getSSLConfig() (x509Certs []*x509.Certificate, rootCAs *x509.CertPool, c *certs.Certs, secureConn bool, err error) {
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn bool, err error) {
|
||||
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
|
||||
return nil, nil, nil, false, nil
|
||||
return nil, nil, false, nil
|
||||
}
|
||||
|
||||
if x509Certs, err = parsePublicCertFile(getPublicCertFile()); err != nil {
|
||||
return nil, nil, nil, false, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
c, err = certs.New(getPublicCertFile(), getPrivateKeyFile(), loadX509KeyPair)
|
||||
if err != nil {
|
||||
return nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
if rootCAs, err = getRootCAs(getCADir()); err != nil {
|
||||
return nil, nil, nil, false, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
secureConn = true
|
||||
return x509Certs, rootCAs, c, secureConn, nil
|
||||
return x509Certs, c, secureConn, nil
|
||||
}
|
||||
|
||||
@@ -223,7 +223,8 @@ func TestGetRootCAs(t *testing.T) {
|
||||
certCAsDir string
|
||||
expectedErr error
|
||||
}{
|
||||
{"nonexistent-dir", errFileNotFound},
|
||||
// ignores non-existent directories.
|
||||
{"nonexistent-dir", nil},
|
||||
// Ignores directories.
|
||||
{dir1, nil},
|
||||
// Ignore empty directory.
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
@@ -28,12 +29,13 @@ import (
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
dns2 "github.com/miekg/dns"
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/logger/target/console"
|
||||
"github.com/minio/minio/cmd/logger/target/http"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
// Check for updates and print a notification message
|
||||
@@ -50,53 +52,129 @@ func checkUpdate(mode string) {
|
||||
|
||||
// Load logger targets based on user's configuration
|
||||
func loadLoggers() {
|
||||
if globalServerConfig.Logger.Console.Enabled {
|
||||
// Enable console logging
|
||||
logger.AddTarget(logger.NewConsole())
|
||||
auditEndpoint, ok := os.LookupEnv("MINIO_AUDIT_LOGGER_HTTP_ENDPOINT")
|
||||
if ok {
|
||||
// Enable audit HTTP logging through ENV.
|
||||
logger.AddAuditTarget(http.New(auditEndpoint, NewCustomHTTPTransport()))
|
||||
}
|
||||
for _, l := range globalServerConfig.Logger.HTTP {
|
||||
if l.Enabled {
|
||||
// Enable http logging
|
||||
logger.AddTarget(logger.NewHTTP(l.Endpoint, NewCustomHTTPTransport()))
|
||||
|
||||
loggerEndpoint, ok := os.LookupEnv("MINIO_LOGGER_HTTP_ENDPOINT")
|
||||
if ok {
|
||||
// Enable HTTP logging through ENV.
|
||||
logger.AddTarget(http.New(loggerEndpoint, NewCustomHTTPTransport()))
|
||||
} else {
|
||||
for _, l := range globalServerConfig.Logger.HTTP {
|
||||
if l.Enabled {
|
||||
// Enable http logging
|
||||
logger.AddTarget(http.New(l.Endpoint, NewCustomHTTPTransport()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if globalServerConfig.Logger.Console.Enabled {
|
||||
// Enable console logging
|
||||
logger.AddTarget(console.New())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
|
||||
var dir string
|
||||
var dirSet bool
|
||||
|
||||
switch {
|
||||
case ctx.IsSet(option):
|
||||
dir = ctx.String(option)
|
||||
dirSet = true
|
||||
case ctx.GlobalIsSet(option):
|
||||
dir = ctx.GlobalString(option)
|
||||
dirSet = true
|
||||
// cli package does not expose parent's option option. Below code is workaround.
|
||||
if dir == "" || dir == getDefaultDir() {
|
||||
dirSet = false // Unset to false since GlobalIsSet() true is a false positive.
|
||||
if ctx.Parent().GlobalIsSet(option) {
|
||||
dir = ctx.Parent().GlobalString(option)
|
||||
dirSet = true
|
||||
}
|
||||
}
|
||||
default:
|
||||
// Neither local nor global option is provided. In this case, try to use
|
||||
// default directory.
|
||||
dir = getDefaultDir()
|
||||
if dir == "" {
|
||||
logger.FatalIf(errInvalidArgument, "%s option must be provided", option)
|
||||
}
|
||||
}
|
||||
|
||||
if dir == "" {
|
||||
logger.FatalIf(errors.New("empty directory"), "%s directory cannot be empty", option)
|
||||
}
|
||||
|
||||
// Disallow relative paths, figure out absolute paths.
|
||||
dirAbs, err := filepath.Abs(dir)
|
||||
logger.FatalIf(err, "Unable to fetch absolute path for %s=%s", option, dir)
|
||||
|
||||
logger.FatalIf(mkdirAllIgnorePerm(dirAbs), "Unable to create directory specified %s=%s", option, dir)
|
||||
|
||||
return &ConfigDir{path: dirAbs}, dirSet
|
||||
}
|
||||
|
||||
func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
|
||||
var configDir string
|
||||
|
||||
switch {
|
||||
case ctx.IsSet("config-dir"):
|
||||
configDir = ctx.String("config-dir")
|
||||
case ctx.GlobalIsSet("config-dir"):
|
||||
configDir = ctx.GlobalString("config-dir")
|
||||
// cli package does not expose parent's "config-dir" option. Below code is workaround.
|
||||
if configDir == "" || configDir == getConfigDir() {
|
||||
if ctx.Parent().GlobalIsSet("config-dir") {
|
||||
configDir = ctx.Parent().GlobalString("config-dir")
|
||||
}
|
||||
}
|
||||
default:
|
||||
// Neither local nor global config-dir option is provided. In this case, try to use
|
||||
// default config directory.
|
||||
configDir = getConfigDir()
|
||||
if configDir == "" {
|
||||
logger.FatalIf(errors.New("missing option"), "config-dir option must be provided")
|
||||
}
|
||||
// Get "json" flag from command line argument and
|
||||
// enable json and quite modes if json flag is turned on.
|
||||
globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
|
||||
if globalCLIContext.JSON {
|
||||
logger.EnableJSON()
|
||||
}
|
||||
|
||||
if configDir == "" {
|
||||
logger.FatalIf(errors.New("empty directory"), "Configuration directory cannot be empty")
|
||||
// Get quiet flag from command line argument.
|
||||
globalCLIContext.Quiet = ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
|
||||
if globalCLIContext.Quiet {
|
||||
logger.EnableQuiet()
|
||||
}
|
||||
|
||||
// Disallow relative paths, figure out absolute paths.
|
||||
configDirAbs, err := filepath.Abs(configDir)
|
||||
logger.FatalIf(err, "Unable to fetch absolute path for config directory %s", configDir)
|
||||
setConfigDir(configDirAbs)
|
||||
// Get anonymous flag from command line argument.
|
||||
globalCLIContext.Anonymous = ctx.IsSet("anonymous") || ctx.GlobalIsSet("anonymous")
|
||||
if globalCLIContext.Anonymous {
|
||||
logger.EnableAnonymous()
|
||||
}
|
||||
|
||||
// Fetch address option
|
||||
globalCLIContext.Addr = ctx.GlobalString("address")
|
||||
if globalCLIContext.Addr == "" || globalCLIContext.Addr == ":"+globalMinioDefaultPort {
|
||||
globalCLIContext.Addr = ctx.String("address")
|
||||
}
|
||||
|
||||
// Set all config, certs and CAs directories.
|
||||
var configSet, certsSet bool
|
||||
globalConfigDir, configSet = newConfigDirFromCtx(ctx, "config-dir", defaultConfigDir.Get)
|
||||
globalCertsDir, certsSet = newConfigDirFromCtx(ctx, "certs-dir", defaultCertsDir.Get)
|
||||
|
||||
// Remove this code when we deprecate and remove config-dir.
|
||||
// This code is to make sure we inherit from the config-dir
|
||||
// option if certs-dir is not provided.
|
||||
if !certsSet && configSet {
|
||||
globalCertsDir = &ConfigDir{path: filepath.Join(globalConfigDir.Get(), certsDir)}
|
||||
}
|
||||
|
||||
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
|
||||
|
||||
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
|
||||
}
|
||||
|
||||
// Parses the given compression exclude list `extensions` or `content-types`.
|
||||
func parseCompressIncludes(includes []string) ([]string, error) {
|
||||
for _, e := range includes {
|
||||
if len(e) == 0 {
|
||||
return nil, uiErrInvalidCompressionIncludesValue(nil).Msg("extension/mime-type (%s) cannot be empty", e)
|
||||
}
|
||||
}
|
||||
return includes, nil
|
||||
}
|
||||
|
||||
func handleCommonEnvVars() {
|
||||
compressEnvDelimiter := ","
|
||||
// Start profiler if env is set.
|
||||
if profiler := os.Getenv("_MINIO_PROFILER"); profiler != "" {
|
||||
var err error
|
||||
@@ -111,6 +189,7 @@ func handleCommonEnvVars() {
|
||||
if err != nil {
|
||||
logger.Fatal(uiErrInvalidCredentials(err), "Unable to validate credentials inherited from the shell environment")
|
||||
}
|
||||
cred.Expiration = timeSentinel
|
||||
|
||||
// credential Envs are set globally.
|
||||
globalIsEnvCreds = true
|
||||
@@ -120,7 +199,7 @@ func handleCommonEnvVars() {
|
||||
if browser := os.Getenv("MINIO_BROWSER"); browser != "" {
|
||||
browserFlag, err := ParseBoolFlag(browser)
|
||||
if err != nil {
|
||||
logger.Fatal(uiErrInvalidBrowserValue(nil).Msg("Unknown value `%s`", browser), "Invalid MINIO_BROWSER environment variable")
|
||||
logger.Fatal(uiErrInvalidBrowserValue(nil).Msg("Unknown value `%s`", browser), "Invalid MINIO_BROWSER value in environment variable")
|
||||
}
|
||||
|
||||
// browser Envs are set globally, this does not represent
|
||||
@@ -139,37 +218,90 @@ func handleCommonEnvVars() {
|
||||
etcdEndpointsEnv, ok := os.LookupEnv("MINIO_ETCD_ENDPOINTS")
|
||||
if ok {
|
||||
etcdEndpoints := strings.Split(etcdEndpointsEnv, ",")
|
||||
|
||||
var etcdSecure bool
|
||||
for _, endpoint := range etcdEndpoints {
|
||||
u, err := xnet.ParseURL(endpoint)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize etcd with %s", etcdEndpoints)
|
||||
}
|
||||
// If one of the endpoint is https, we will use https directly.
|
||||
etcdSecure = etcdSecure || u.Scheme == "https"
|
||||
}
|
||||
|
||||
var err error
|
||||
globalEtcdClient, err = etcd.New(etcd.Config{
|
||||
Endpoints: etcdEndpoints,
|
||||
DialTimeout: defaultDialTimeout,
|
||||
DialKeepAliveTime: defaultDialKeepAlive,
|
||||
})
|
||||
if etcdSecure {
|
||||
// This is only to support client side certificate authentication
|
||||
// https://coreos.com/etcd/docs/latest/op-guide/security.html
|
||||
etcdClientCertFile, ok1 := os.LookupEnv("MINIO_ETCD_CLIENT_CERT")
|
||||
etcdClientCertKey, ok2 := os.LookupEnv("MINIO_ETCD_CLIENT_CERT_KEY")
|
||||
var getClientCertificate func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
|
||||
if ok1 && ok2 {
|
||||
getClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
cert, terr := tls.LoadX509KeyPair(etcdClientCertFile, etcdClientCertKey)
|
||||
return &cert, terr
|
||||
}
|
||||
}
|
||||
|
||||
globalEtcdClient, err = etcd.New(etcd.Config{
|
||||
Endpoints: etcdEndpoints,
|
||||
DialTimeout: defaultDialTimeout,
|
||||
DialKeepAliveTime: defaultDialKeepAlive,
|
||||
TLS: &tls.Config{
|
||||
RootCAs: globalRootCAs,
|
||||
GetClientCertificate: getClientCertificate,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
globalEtcdClient, err = etcd.New(etcd.Config{
|
||||
Endpoints: etcdEndpoints,
|
||||
DialTimeout: defaultDialTimeout,
|
||||
DialKeepAliveTime: defaultDialKeepAlive,
|
||||
})
|
||||
}
|
||||
logger.FatalIf(err, "Unable to initialize etcd with %s", etcdEndpoints)
|
||||
}
|
||||
|
||||
globalDomainName, globalIsEnvDomainName = os.LookupEnv("MINIO_DOMAIN")
|
||||
if globalDomainName != "" {
|
||||
if _, ok := dns2.IsDomainName(globalDomainName); !ok {
|
||||
logger.Fatal(uiErrInvalidDomainValue(nil).Msg("Unknown value `%s`", globalDomainName), "Invalid MINIO_DOMAIN environment variable")
|
||||
v, ok := os.LookupEnv("MINIO_DOMAIN")
|
||||
if ok {
|
||||
for _, domainName := range strings.Split(v, ",") {
|
||||
if _, ok = dns2.IsDomainName(domainName); !ok {
|
||||
logger.Fatal(uiErrInvalidDomainValue(nil).Msg("Unknown value `%s`", domainName),
|
||||
"Invalid MINIO_DOMAIN value in environment variable")
|
||||
}
|
||||
globalDomainNames = append(globalDomainNames, domainName)
|
||||
}
|
||||
}
|
||||
|
||||
minioEndpointsEnv, ok := os.LookupEnv("MINIO_PUBLIC_IPS")
|
||||
if ok {
|
||||
minioEndpoints := strings.Split(minioEndpointsEnv, ",")
|
||||
globalDomainIPs = set.NewStringSet()
|
||||
for i, ip := range minioEndpoints {
|
||||
if net.ParseIP(ip) == nil {
|
||||
logger.FatalIf(errInvalidArgument, "Unable to initialize Minio server with invalid MINIO_PUBLIC_IPS[%d]: %s", i, ip)
|
||||
var domainIPs = set.NewStringSet()
|
||||
for _, endpoint := range minioEndpoints {
|
||||
if net.ParseIP(endpoint) == nil {
|
||||
// Checking if the IP is a DNS entry.
|
||||
addrs, err := net.LookupHost(endpoint)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize Minio server with [%s] invalid entry found in MINIO_PUBLIC_IPS", endpoint)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
domainIPs.Add(addr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
globalDomainIPs.Add(ip)
|
||||
domainIPs.Add(endpoint)
|
||||
}
|
||||
updateDomainIPs(domainIPs)
|
||||
} else {
|
||||
// Add found interfaces IP address to global domain IPS,
|
||||
// loopback addresses will be naturally dropped.
|
||||
updateDomainIPs(localIP4)
|
||||
}
|
||||
if globalDomainName != "" && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
|
||||
|
||||
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
|
||||
var err error
|
||||
globalDNSConfig, err = dns.NewCoreDNS(globalDomainName, globalDomainIPs, globalMinioPort, globalEtcdClient)
|
||||
logger.FatalIf(err, "Unable to initialize DNS config for %s.", globalDomainName)
|
||||
globalDNSConfig, err = dns.NewCoreDNS(globalDomainNames, globalDomainIPs, globalMinioPort, globalEtcdClient)
|
||||
logger.FatalIf(err, "Unable to initialize DNS config for %s.", globalDomainNames)
|
||||
}
|
||||
|
||||
if drives := os.Getenv("MINIO_CACHE_DRIVES"); drives != "" {
|
||||
@@ -246,7 +378,7 @@ func handleCommonEnvVars() {
|
||||
if worm := os.Getenv("MINIO_WORM"); worm != "" {
|
||||
wormFlag, err := ParseBoolFlag(worm)
|
||||
if err != nil {
|
||||
logger.Fatal(uiErrInvalidWormValue(nil).Msg("Unknown value `%s`", worm), "Unable to validate MINIO_WORM environment variable")
|
||||
logger.Fatal(uiErrInvalidWormValue(nil).Msg("Unknown value `%s`", worm), "Invalid MINIO_WORM value in environment variable")
|
||||
}
|
||||
|
||||
// worm Envs are set globally, this does not represent
|
||||
@@ -255,17 +387,27 @@ func handleCommonEnvVars() {
|
||||
globalWORMEnabled = bool(wormFlag)
|
||||
}
|
||||
|
||||
kmsConf, err := crypto.NewVaultConfig()
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to initialize hashicorp vault")
|
||||
if compress := os.Getenv("MINIO_COMPRESS"); compress != "" {
|
||||
globalIsCompressionEnabled = strings.EqualFold(compress, "true")
|
||||
}
|
||||
if kmsConf.Vault.Endpoint != "" {
|
||||
kms, err := crypto.NewVault(kmsConf)
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to initialize KMS")
|
||||
|
||||
compressExtensions := os.Getenv("MINIO_COMPRESS_EXTENSIONS")
|
||||
compressMimeTypes := os.Getenv("MINIO_COMPRESS_MIMETYPES")
|
||||
if compressExtensions != "" || compressMimeTypes != "" {
|
||||
globalIsEnvCompression = true
|
||||
if compressExtensions != "" {
|
||||
extensions, err := parseCompressIncludes(strings.Split(compressExtensions, compressEnvDelimiter))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Invalid MINIO_COMPRESS_EXTENSIONS value (`%s`)", extensions)
|
||||
}
|
||||
globalCompressExtensions = extensions
|
||||
}
|
||||
if compressMimeTypes != "" {
|
||||
contenttypes, err := parseCompressIncludes(strings.Split(compressMimeTypes, compressEnvDelimiter))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Invalid MINIO_COMPRESS_MIMETYPES value (`%s`)", contenttypes)
|
||||
}
|
||||
globalCompressMimeTypes = contenttypes
|
||||
}
|
||||
globalKMS = kms
|
||||
globalKMSKeyID = kmsConf.Vault.Key.Name
|
||||
globalKMSConfig = kmsConf
|
||||
}
|
||||
}
|
||||
|
||||
151
cmd/config-common.go
Normal file
151
cmd/config-common.go
Normal file
@@ -0,0 +1,151 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
var errConfigNotFound = errors.New("config file not found")
|
||||
|
||||
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
// Read entire content by setting size to -1
|
||||
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
|
||||
// Treat object not found as config not found.
|
||||
if isErrObjectNotFound(err) {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return config not found on empty content.
|
||||
if buffer.Len() == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func deleteConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
|
||||
_, err := client.Delete(ctx, configFile)
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
return objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
|
||||
}
|
||||
|
||||
func saveConfigEtcd(ctx context.Context, client *etcd.Client, configFile string, data []byte) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
_, err := client.Put(timeoutCtx, configFile, string(data))
|
||||
if err == context.DeadlineExceeded {
|
||||
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader, nil, nil), ObjectOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func readConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) ([]byte, error) {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
resp, err := client.Get(timeoutCtx, configFile)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
return nil, fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
if string(ev.Key) == configFile {
|
||||
return ev.Value, nil
|
||||
}
|
||||
}
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
// watchConfigEtcd - watches for changes on `configFile` on etcd and loads them.
|
||||
func watchConfigEtcd(objAPI ObjectLayer, configFile string, loadCfgFn func(ObjectLayer) error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
defer cancel()
|
||||
for watchResp := range globalEtcdClient.Watch(ctx, configFile) {
|
||||
for _, event := range watchResp.Events {
|
||||
if event.IsModify() || event.IsCreate() {
|
||||
loadCfgFn(objAPI)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
resp, err := client.Get(timeoutCtx, configFile)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
}
|
||||
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
return errConfigNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
if globalEtcdClient != nil {
|
||||
return checkConfigEtcd(ctx, globalEtcdClient, configFile)
|
||||
}
|
||||
|
||||
if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile, ObjectOptions{}); err != nil {
|
||||
// Treat object not found as config not found.
|
||||
if isErrObjectNotFound(err) {
|
||||
return errConfigNotFound
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -20,15 +20,19 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
// Steps to move from version N to version N+1
|
||||
@@ -40,9 +44,9 @@ import (
|
||||
// 6. Make changes in config-current_test.go for any test change
|
||||
|
||||
// Config version
|
||||
const serverConfigVersion = "29"
|
||||
const serverConfigVersion = "33"
|
||||
|
||||
type serverConfig = serverConfigV29
|
||||
type serverConfig = serverConfigV33
|
||||
|
||||
var (
|
||||
// globalServerConfig server config.
|
||||
@@ -173,61 +177,79 @@ func (s *serverConfig) Validate() error {
|
||||
// Worm, Cache and StorageClass values are already validated during json unmarshal
|
||||
for _, v := range s.Notify.AMQP {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("amqp: %s", err.Error())
|
||||
return fmt.Errorf("amqp: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Elasticsearch {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("elasticsearch: %s", err.Error())
|
||||
return fmt.Errorf("elasticsearch: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Kafka {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("kafka: %s", err.Error())
|
||||
return fmt.Errorf("kafka: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.MQTT {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("mqtt: %s", err.Error())
|
||||
return fmt.Errorf("mqtt: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.MySQL {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("mysql: %s", err.Error())
|
||||
return fmt.Errorf("mysql: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.NATS {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("nats: %s", err.Error())
|
||||
return fmt.Errorf("nats: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.NSQ {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("nsq: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.PostgreSQL {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("postgreSQL: %s", err.Error())
|
||||
return fmt.Errorf("postgreSQL: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Redis {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("redis: %s", err.Error())
|
||||
return fmt.Errorf("redis: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Webhook {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("webhook: %s", err.Error())
|
||||
return fmt.Errorf("webhook: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetCompressionConfig sets the current compression config
|
||||
func (s *serverConfig) SetCompressionConfig(extensions []string, mimeTypes []string) {
|
||||
s.Compression.Extensions = extensions
|
||||
s.Compression.MimeTypes = mimeTypes
|
||||
s.Compression.Enabled = globalIsCompressionEnabled
|
||||
}
|
||||
|
||||
// GetCompressionConfig gets the current compression config
|
||||
func (s *serverConfig) GetCompressionConfig() compressionConfig {
|
||||
return s.Compression
|
||||
}
|
||||
|
||||
func (s *serverConfig) loadFromEnvs() {
|
||||
// If env is set override the credentials from config file.
|
||||
if globalIsEnvCreds {
|
||||
@@ -250,8 +272,26 @@ func (s *serverConfig) loadFromEnvs() {
|
||||
s.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry, globalCacheMaxUse)
|
||||
}
|
||||
|
||||
if globalKMS != nil {
|
||||
s.KMS = globalKMSConfig
|
||||
if err := Environment.LookupKMSConfig(s.KMS); err != nil {
|
||||
logger.FatalIf(err, "Unable to setup the KMS")
|
||||
}
|
||||
|
||||
if globalIsEnvCompression {
|
||||
s.SetCompressionConfig(globalCompressExtensions, globalCompressMimeTypes)
|
||||
}
|
||||
|
||||
if jwksURL, ok := os.LookupEnv("MINIO_IAM_JWKS_URL"); ok {
|
||||
if u, err := xnet.ParseURL(jwksURL); err == nil {
|
||||
s.OpenID.JWKS.URL = u
|
||||
logger.FatalIf(s.OpenID.JWKS.PopulatePublicKey(), "Unable to populate public key from JWKS URL")
|
||||
}
|
||||
}
|
||||
|
||||
if opaURL, ok := os.LookupEnv("MINIO_IAM_OPA_URL"); ok {
|
||||
if u, err := xnet.ParseURL(opaURL); err == nil {
|
||||
s.Policy.OPA.URL = u
|
||||
s.Policy.OPA.AuthToken = os.Getenv("MINIO_IAM_OPA_AUTHTOKEN")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -325,6 +365,17 @@ func (s *serverConfig) TestNotificationTargets() error {
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.NSQ {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewNSQTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nsq(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.PostgreSQL {
|
||||
if !v.Enable {
|
||||
continue
|
||||
@@ -366,10 +417,14 @@ func (s *serverConfig) ConfigDiff(t *serverConfig) string {
|
||||
return "StorageClass configuration differs"
|
||||
case !reflect.DeepEqual(s.Cache, t.Cache):
|
||||
return "Cache configuration differs"
|
||||
case !reflect.DeepEqual(s.Compression, t.Compression):
|
||||
return "Compression configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.AMQP, t.Notify.AMQP):
|
||||
return "AMQP Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.NATS, t.Notify.NATS):
|
||||
return "NATS Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.NSQ, t.Notify.NSQ):
|
||||
return "NSQ Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.Elasticsearch, t.Notify.Elasticsearch):
|
||||
return "ElasticSearch Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.Redis, t.Notify.Redis):
|
||||
@@ -417,6 +472,11 @@ func newServerConfig() *serverConfig {
|
||||
},
|
||||
KMS: crypto.KMSConfig{},
|
||||
Notify: notifier{},
|
||||
Compression: compressionConfig{
|
||||
Enabled: false,
|
||||
Extensions: globalCompressExtensions,
|
||||
MimeTypes: globalCompressMimeTypes,
|
||||
},
|
||||
}
|
||||
|
||||
// Make sure to initialize notification configs.
|
||||
@@ -430,6 +490,8 @@ func newServerConfig() *serverConfig {
|
||||
srvCfg.Notify.Redis["1"] = target.RedisArgs{}
|
||||
srvCfg.Notify.NATS = make(map[string]target.NATSArgs)
|
||||
srvCfg.Notify.NATS["1"] = target.NATSArgs{}
|
||||
srvCfg.Notify.NSQ = make(map[string]target.NSQArgs)
|
||||
srvCfg.Notify.NSQ["1"] = target.NSQArgs{}
|
||||
srvCfg.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs)
|
||||
srvCfg.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{}
|
||||
srvCfg.Notify.MySQL = make(map[string]target.MySQLArgs)
|
||||
@@ -473,18 +535,32 @@ func (s *serverConfig) loadToCachedConfigs() {
|
||||
globalCacheExpiry = cacheConf.Expiry
|
||||
globalCacheMaxUse = cacheConf.MaxUse
|
||||
}
|
||||
if globalKMS == nil {
|
||||
globalKMSConfig = s.KMS
|
||||
if kms, err := crypto.NewVault(globalKMSConfig); err == nil {
|
||||
globalKMS = kms
|
||||
globalKMSKeyID = globalKMSConfig.Vault.Key.Name
|
||||
}
|
||||
if err := Environment.LookupKMSConfig(s.KMS); err != nil {
|
||||
logger.FatalIf(err, "Unable to setup the KMS")
|
||||
}
|
||||
|
||||
if !globalIsCompressionEnabled {
|
||||
compressionConf := s.GetCompressionConfig()
|
||||
globalCompressExtensions = compressionConf.Extensions
|
||||
globalCompressMimeTypes = compressionConf.MimeTypes
|
||||
globalIsCompressionEnabled = compressionConf.Enabled
|
||||
}
|
||||
|
||||
globalIAMValidators = getAuthValidators(s)
|
||||
|
||||
if s.Policy.OPA.URL != nil && s.Policy.OPA.URL.String() != "" {
|
||||
globalPolicyOPA = iampolicy.NewOpa(iampolicy.OpaArgs{
|
||||
URL: s.Policy.OPA.URL,
|
||||
AuthToken: s.Policy.OPA.AuthToken,
|
||||
Transport: NewCustomHTTPTransport(),
|
||||
CloseRespFn: xhttp.DrainBody,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// newConfig - initialize a new server config, saves env parameters if
|
||||
// newSrvConfig - initialize a new server config, saves env parameters if
|
||||
// found, otherwise use default parameters
|
||||
func newConfig(objAPI ObjectLayer) error {
|
||||
func newSrvConfig(objAPI ObjectLayer) error {
|
||||
// Initialize server config.
|
||||
srvCfg := newServerConfig()
|
||||
|
||||
@@ -535,6 +611,20 @@ func loadConfig(objAPI ObjectLayer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getAuthValidators - returns ValidatorList which contains
|
||||
// enabled providers in server config.
|
||||
// A new authentication provider is added like below
|
||||
// * Add a new provider in pkg/iam/validator package.
|
||||
func getAuthValidators(config *serverConfig) *validator.Validators {
|
||||
validators := validator.NewValidators()
|
||||
|
||||
if config.OpenID.JWKS.URL != nil {
|
||||
validators.Add(validator.NewJWT(config.OpenID.JWKS))
|
||||
}
|
||||
|
||||
return validators
|
||||
}
|
||||
|
||||
// getNotificationTargets - returns TargetList which contains enabled targets in serverConfig.
|
||||
// A new notification target is added like below
|
||||
// * Add a new target in pkg/event/target package.
|
||||
@@ -591,6 +681,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.MQTT {
|
||||
if args.Enable {
|
||||
args.RootCAs = globalRootCAs
|
||||
newTarget, err := target.NewMQTTTarget(id, args)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
@@ -631,6 +722,20 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.NSQ {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewNSQTarget(id, args)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.PostgreSQL {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args)
|
||||
@@ -661,6 +766,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.Webhook {
|
||||
if args.Enable {
|
||||
args.RootCAs = globalRootCAs
|
||||
newTarget := target.NewWebhookTarget(id, args)
|
||||
if err := targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
|
||||
@@ -130,8 +130,8 @@ func TestServerConfigWithEnvs(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check if serverConfig has the correct domain
|
||||
if globalDomainName != "domain.com" {
|
||||
t.Errorf("Expected Domain to be `domain.com`, found " + globalDomainName)
|
||||
if globalDomainNames[0] != "domain.com" {
|
||||
t.Errorf("Expected Domain to be `domain.com`, found " + globalDomainNames[0])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,7 +188,7 @@ func TestValidateConfig(t *testing.T) {
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false},
|
||||
|
||||
// Test 12 - Test NATS
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "clientID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
|
||||
|
||||
// Test 13 - Test ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false},
|
||||
@@ -233,11 +233,14 @@ func TestValidateConfig(t *testing.T) {
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
|
||||
|
||||
// Test 27 - Test MQTT
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "clientId": "", "username": "", "password": ""}}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "username": "", "password": "", "queueDir": "", "queueLimit": 0}}}}`, false},
|
||||
|
||||
// Test 28 - Test NSQ
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nsq": { "1": { "enable": true, "nsqdAddress": "", "topic": ""} }}}`, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
if err = saveConfig(objLayer, configPath, []byte(testCase.configData)); err != nil {
|
||||
if err = saveConfig(context.Background(), objLayer, configPath, []byte(testCase.configData)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = getValidConfig(objLayer)
|
||||
@@ -260,8 +263,16 @@ func TestConfigDiff(t *testing.T) {
|
||||
{&serverConfig{}, nil, "Given configuration is empty"},
|
||||
// 2
|
||||
{
|
||||
&serverConfig{Credential: auth.Credentials{"u1", "p1"}},
|
||||
&serverConfig{Credential: auth.Credentials{"u1", "p2"}},
|
||||
&serverConfig{Credential: auth.Credentials{
|
||||
AccessKey: "u1",
|
||||
SecretKey: "p1",
|
||||
Expiration: timeSentinel,
|
||||
}},
|
||||
&serverConfig{Credential: auth.Credentials{
|
||||
AccessKey: "u1",
|
||||
SecretKey: "p2",
|
||||
Expiration: timeSentinel,
|
||||
}},
|
||||
"Credential configuration differs",
|
||||
},
|
||||
// 3
|
||||
@@ -285,48 +296,54 @@ func TestConfigDiff(t *testing.T) {
|
||||
"NATS Notification configuration differs",
|
||||
},
|
||||
// 7
|
||||
{
|
||||
&serverConfig{Notify: notifier{NSQ: map[string]target.NSQArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{NSQ: map[string]target.NSQArgs{"1": {Enable: false}}}},
|
||||
"NSQ Notification configuration differs",
|
||||
},
|
||||
// 8
|
||||
{
|
||||
&serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: false}}}},
|
||||
"ElasticSearch Notification configuration differs",
|
||||
},
|
||||
// 8
|
||||
// 9
|
||||
{
|
||||
&serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: false}}}},
|
||||
"Redis Notification configuration differs",
|
||||
},
|
||||
// 9
|
||||
// 10
|
||||
{
|
||||
&serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: false}}}},
|
||||
"PostgreSQL Notification configuration differs",
|
||||
},
|
||||
// 10
|
||||
// 11
|
||||
{
|
||||
&serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: false}}}},
|
||||
"Kafka Notification configuration differs",
|
||||
},
|
||||
// 11
|
||||
// 12
|
||||
{
|
||||
&serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: false}}}},
|
||||
"Webhook Notification configuration differs",
|
||||
},
|
||||
// 12
|
||||
// 13
|
||||
{
|
||||
&serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: false}}}},
|
||||
"MySQL Notification configuration differs",
|
||||
},
|
||||
// 13
|
||||
// 14
|
||||
{
|
||||
&serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: false}}}},
|
||||
"MQTT Notification configuration differs",
|
||||
},
|
||||
// 14
|
||||
// 15
|
||||
{
|
||||
&serverConfig{Logger: loggerConfig{
|
||||
Console: loggerConsole{Enabled: true},
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
)
|
||||
@@ -41,55 +40,9 @@ const (
|
||||
privateKeyFile = "private.key"
|
||||
)
|
||||
|
||||
// ConfigDir - configuration directory with locking.
|
||||
// ConfigDir - points to a user set directory.
|
||||
type ConfigDir struct {
|
||||
sync.Mutex
|
||||
dir string
|
||||
}
|
||||
|
||||
// Set - saves given directory as configuration directory.
|
||||
func (config *ConfigDir) Set(dir string) {
|
||||
config.Lock()
|
||||
defer config.Unlock()
|
||||
|
||||
config.dir = dir
|
||||
}
|
||||
|
||||
// Get - returns current configuration directory.
|
||||
func (config *ConfigDir) Get() string {
|
||||
config.Lock()
|
||||
defer config.Unlock()
|
||||
|
||||
return config.dir
|
||||
}
|
||||
|
||||
func (config *ConfigDir) getCertsDir() string {
|
||||
return filepath.Join(config.Get(), certsDir)
|
||||
}
|
||||
|
||||
// GetCADir - returns certificate CA directory.
|
||||
func (config *ConfigDir) GetCADir() string {
|
||||
return filepath.Join(config.getCertsDir(), certsCADir)
|
||||
}
|
||||
|
||||
// Create - creates configuration directory tree.
|
||||
func (config *ConfigDir) Create() error {
|
||||
return os.MkdirAll(config.GetCADir(), 0700)
|
||||
}
|
||||
|
||||
// GetMinioConfigFile - returns absolute path of config.json file.
|
||||
func (config *ConfigDir) GetMinioConfigFile() string {
|
||||
return filepath.Join(config.Get(), minioConfigFile)
|
||||
}
|
||||
|
||||
// GetPublicCertFile - returns absolute path of public.crt file.
|
||||
func (config *ConfigDir) GetPublicCertFile() string {
|
||||
return filepath.Join(config.getCertsDir(), publicCertFile)
|
||||
}
|
||||
|
||||
// GetPrivateKeyFile - returns absolute path of private.key file.
|
||||
func (config *ConfigDir) GetPrivateKeyFile() string {
|
||||
return filepath.Join(config.getCertsDir(), privateKeyFile)
|
||||
path string
|
||||
}
|
||||
|
||||
func getDefaultConfigDir() string {
|
||||
@@ -101,32 +54,54 @@ func getDefaultConfigDir() string {
|
||||
return filepath.Join(homeDir, defaultMinioConfigDir)
|
||||
}
|
||||
|
||||
var configDir = &ConfigDir{dir: getDefaultConfigDir()}
|
||||
|
||||
func setConfigDir(dir string) {
|
||||
configDir.Set(dir)
|
||||
func getDefaultCertsDir() string {
|
||||
return filepath.Join(getDefaultConfigDir(), certsDir)
|
||||
}
|
||||
|
||||
func getConfigDir() string {
|
||||
return configDir.Get()
|
||||
func getDefaultCertsCADir() string {
|
||||
return filepath.Join(getDefaultCertsDir(), certsCADir)
|
||||
}
|
||||
|
||||
func getCADir() string {
|
||||
return configDir.GetCADir()
|
||||
var (
|
||||
// Default config, certs and CA directories.
|
||||
defaultConfigDir = &ConfigDir{path: getDefaultConfigDir()}
|
||||
defaultCertsDir = &ConfigDir{path: getDefaultCertsDir()}
|
||||
defaultCertsCADir = &ConfigDir{path: getDefaultCertsCADir()}
|
||||
|
||||
// Points to current configuration directory -- deprecated, to be removed in future.
|
||||
globalConfigDir = defaultConfigDir
|
||||
// Points to current certs directory set by user with --certs-dir
|
||||
globalCertsDir = defaultCertsDir
|
||||
// Points to relative path to certs directory and is <value-of-certs-dir>/CAs
|
||||
globalCertsCADir = defaultCertsCADir
|
||||
)
|
||||
|
||||
// Get - returns current directory.
|
||||
func (dir *ConfigDir) Get() string {
|
||||
return dir.path
|
||||
}
|
||||
|
||||
func createConfigDir() error {
|
||||
return configDir.Create()
|
||||
// Attempts to create all directories, ignores any permission denied errors.
|
||||
func mkdirAllIgnorePerm(path string) error {
|
||||
err := os.MkdirAll(path, 0700)
|
||||
if err != nil {
|
||||
// It is possible in kubernetes like deployments this directory
|
||||
// is already mounted and is not writable, ignore any write errors.
|
||||
if os.IsPermission(err) {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getConfigFile() string {
|
||||
return configDir.GetMinioConfigFile()
|
||||
return filepath.Join(globalConfigDir.Get(), minioConfigFile)
|
||||
}
|
||||
|
||||
func getPublicCertFile() string {
|
||||
return configDir.GetPublicCertFile()
|
||||
return filepath.Join(globalCertsDir.Get(), publicCertFile)
|
||||
}
|
||||
|
||||
func getPrivateKeyFile() string {
|
||||
return configDir.GetPrivateKeyFile()
|
||||
return filepath.Join(globalCertsDir.Get(), privateKeyFile)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
@@ -26,9 +27,10 @@ import (
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
@@ -228,11 +230,11 @@ func migrateConfig() error {
|
||||
|
||||
// Version '1' is not supported anymore and deprecated, safe to delete.
|
||||
func purgeV1() error {
|
||||
configFile := filepath.Join(getConfigDir(), "fsUsers.json")
|
||||
configFile := filepath.Join(globalConfigDir.Get(), "fsUsers.json")
|
||||
|
||||
cv1 := &configV1{}
|
||||
_, err := Load(configFile, cv1)
|
||||
if os.IsNotExist(err) || err == dns.ErrNoEntriesFound {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘1’. %v", err)
|
||||
@@ -916,7 +918,7 @@ func migrateV12ToV13() error {
|
||||
// Copy over fields from V12 into V13 config struct
|
||||
srvConfig := &serverConfigV13{
|
||||
Logger: &loggerV7{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "13"
|
||||
srvConfig.Credential = cv12.Credential
|
||||
@@ -996,7 +998,7 @@ func migrateV13ToV14() error {
|
||||
// Copy over fields from V13 into V14 config struct
|
||||
srvConfig := &serverConfigV14{
|
||||
Logger: &loggerV7{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "14"
|
||||
srvConfig.Credential = cv13.Credential
|
||||
@@ -1081,7 +1083,7 @@ func migrateV14ToV15() error {
|
||||
// Copy over fields from V14 into V15 config struct
|
||||
srvConfig := &serverConfigV15{
|
||||
Logger: &loggerV7{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "15"
|
||||
srvConfig.Credential = cv14.Credential
|
||||
@@ -1171,7 +1173,7 @@ func migrateV15ToV16() error {
|
||||
// Copy over fields from V15 into V16 config struct
|
||||
srvConfig := &serverConfigV16{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "16"
|
||||
srvConfig.Credential = cv15.Credential
|
||||
@@ -1261,7 +1263,7 @@ func migrateV16ToV17() error {
|
||||
// Copy over fields from V16 into V17 config struct
|
||||
srvConfig := &serverConfigV17{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "17"
|
||||
srvConfig.Credential = cv16.Credential
|
||||
@@ -1382,7 +1384,7 @@ func migrateV17ToV18() error {
|
||||
// Copy over fields from V17 into V18 config struct
|
||||
srvConfig := &serverConfigV17{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "18"
|
||||
srvConfig.Credential = cv17.Credential
|
||||
@@ -1484,7 +1486,7 @@ func migrateV18ToV19() error {
|
||||
// Copy over fields from V18 into V19 config struct
|
||||
srvConfig := &serverConfigV18{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "19"
|
||||
srvConfig.Credential = cv18.Credential
|
||||
@@ -1590,7 +1592,7 @@ func migrateV19ToV20() error {
|
||||
// Copy over fields from V19 into V20 config struct
|
||||
srvConfig := &serverConfigV20{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "20"
|
||||
srvConfig.Credential = cv19.Credential
|
||||
@@ -1694,7 +1696,7 @@ func migrateV20ToV21() error {
|
||||
|
||||
// Copy over fields from V20 into V21 config struct
|
||||
srvConfig := &serverConfigV21{
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "21"
|
||||
srvConfig.Credential = cv20.Credential
|
||||
@@ -1798,7 +1800,7 @@ func migrateV21ToV22() error {
|
||||
|
||||
// Copy over fields from V21 into V22 config struct
|
||||
srvConfig := &serverConfigV22{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "22"
|
||||
srvConfig.Credential = cv21.Credential
|
||||
@@ -1902,7 +1904,7 @@ func migrateV22ToV23() error {
|
||||
|
||||
// Copy over fields from V22 into V23 config struct
|
||||
srvConfig := &serverConfigV23{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "23"
|
||||
srvConfig.Credential = cv22.Credential
|
||||
@@ -2015,7 +2017,7 @@ func migrateV23ToV24() error {
|
||||
|
||||
// Copy over fields from V23 into V24 config struct
|
||||
srvConfig := &serverConfigV24{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "24"
|
||||
srvConfig.Credential = cv23.Credential
|
||||
@@ -2128,7 +2130,7 @@ func migrateV24ToV25() error {
|
||||
|
||||
// Copy over fields from V24 into V25 config struct
|
||||
srvConfig := &serverConfigV25{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "25"
|
||||
srvConfig.Credential = cv24.Credential
|
||||
@@ -2246,7 +2248,7 @@ func migrateV25ToV26() error {
|
||||
|
||||
// Copy over fields from V25 into V26 config struct
|
||||
srvConfig := &serverConfigV26{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "26"
|
||||
srvConfig.Credential = cv25.Credential
|
||||
@@ -2387,6 +2389,7 @@ func migrateV27ToV28() error {
|
||||
|
||||
// config V28 is backward compatible with V27, load the old
|
||||
// config file in serverConfigV28 struct and initialize KMSConfig
|
||||
|
||||
srvConfig := &serverConfigV28{}
|
||||
_, err := quick.LoadConfig(configFile, globalEtcdClient, srvConfig)
|
||||
if os.IsNotExist(err) {
|
||||
@@ -2409,28 +2412,176 @@ func migrateV27ToV28() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Migrates '.minio.sys/config.json' v27 to v28.
|
||||
// Migrates ${HOME}/.minio/config.json to '<export_path>/.minio.sys/config/config.json'
|
||||
func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
// Construct path to config.json for the given bucket.
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
// Verify if config was already available in .minio.sys in which case, nothing more to be done.
|
||||
if err = checkConfig(context.Background(), objAPI, configFile); err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Rename config.json to config.json.deprecated only upon
|
||||
// success of this function.
|
||||
if err == nil {
|
||||
os.Rename(getConfigFile(), getConfigFile()+".deprecated")
|
||||
}
|
||||
}()
|
||||
|
||||
transactionConfigFile := configFile + ".transaction"
|
||||
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
|
||||
if err = objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
|
||||
// Verify if backend already has the file (after holding lock)
|
||||
if err = checkConfig(context.Background(), objAPI, configFile); err != errConfigNotFound {
|
||||
return err
|
||||
} // if errConfigNotFound proceed to migrate..
|
||||
|
||||
var config = &serverConfig{}
|
||||
if _, err = Load(getConfigFile(), config); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
// Read from deprecate file as well if necessary.
|
||||
if _, err = Load(getConfigFile()+".deprecated", config); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
// If all else fails simply initialize the server config.
|
||||
return newSrvConfig(objAPI)
|
||||
}
|
||||
|
||||
}
|
||||
return saveServerConfig(context.Background(), objAPI, config)
|
||||
}
|
||||
|
||||
// Migrates '.minio.sys/config.json' to v33.
|
||||
func migrateMinioSysConfig(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
// Check if the config version is latest, if not migrate.
|
||||
ok, _, err := checkConfigVersion(objAPI, configFile, serverConfigVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct path to config.json for the given bucket.
|
||||
transactionConfigFile := configFile + ".transaction"
|
||||
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
|
||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
|
||||
if err := migrateV27ToV28MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
return migrateV28ToV29MinioSys(objAPI)
|
||||
if err := migrateV28ToV29MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := migrateV29ToV30MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := migrateV30ToV31MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := migrateV31ToV32MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
return migrateV32ToV33MinioSys(objAPI)
|
||||
}
|
||||
|
||||
func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
|
||||
func checkConfigVersion(objAPI ObjectLayer, configFile string, version string) (bool, []byte, error) {
|
||||
data, err := readConfig(context.Background(), objAPI, configFile)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
var versionConfig struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
vcfg := &versionConfig
|
||||
if err = json.Unmarshal(data, vcfg); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
return vcfg.Version == version, data, nil
|
||||
}
|
||||
|
||||
func migrateV27ToV28MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
srvConfig, err := readServerConfig(context.Background(), objAPI)
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "27")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if srvConfig.Version != "28" {
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
srvConfig.Version = "29"
|
||||
if err = saveServerConfig(context.Background(), objAPI, srvConfig); err != nil {
|
||||
cfg := &serverConfigV28{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "28"
|
||||
cfg.KMS = crypto.KMSConfig{}
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘27’ to ‘28’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "27", "28")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "28")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV29{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "29"
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘28’ to ‘29’. %v", err)
|
||||
}
|
||||
|
||||
@@ -2438,24 +2589,147 @@ func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV27ToV28MinioSys(objAPI ObjectLayer) error {
|
||||
func migrateV29ToV30MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
srvConfig, err := readServerConfig(context.Background(), objAPI)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "29")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if srvConfig.Version != "27" {
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
srvConfig.Version = "28"
|
||||
srvConfig.KMS = crypto.KMSConfig{}
|
||||
if err = saveServerConfig(context.Background(), objAPI, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘27’ to ‘28’. %v", err)
|
||||
cfg := &serverConfigV30{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "27", "28")
|
||||
cfg.Version = "30"
|
||||
// Init compression config.For future migration, Compression config needs to be copied over from previous version.
|
||||
cfg.Compression.Enabled = false
|
||||
cfg.Compression.Extensions = globalCompressExtensions
|
||||
cfg.Compression.MimeTypes = globalCompressMimeTypes
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘29’ to ‘30’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "29", "30")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV30ToV31MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "30")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV31{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "31"
|
||||
cfg.OpenID.JWKS = validator.JWKSArgs{
|
||||
URL: &xnet.URL{},
|
||||
}
|
||||
cfg.Policy.OPA = iampolicy.OpaArgs{
|
||||
URL: &xnet.URL{},
|
||||
AuthToken: "",
|
||||
}
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘30’ to ‘31’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "30", "31")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV31ToV32MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "31")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV32{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "32"
|
||||
cfg.Notify.NSQ = make(map[string]target.NSQArgs)
|
||||
cfg.Notify.NSQ["1"] = target.NSQArgs{}
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘31’ to ‘32’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "31", "32")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV32ToV33MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "32")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV33{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "33"
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from 32 to 33 . %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "32", "33")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ func TestServerConfigMigrateV1(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
setConfigDir(rootPath)
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = objLayer
|
||||
@@ -77,7 +77,7 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
setConfigDir(rootPath)
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
|
||||
if err := migrateV2ToV3(); err != nil {
|
||||
t.Fatal("migrate v2 to v3 should succeed when no config file is found")
|
||||
@@ -159,14 +159,15 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test if a config migration from v2 to v29 is successfully done
|
||||
func TestServerConfigMigrateV2toV29(t *testing.T) {
|
||||
// Test if a config migration from v2 to v33 is successfully done
|
||||
func TestServerConfigMigrateV2toV33(t *testing.T) {
|
||||
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
setConfigDir(rootPath)
|
||||
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
@@ -222,6 +223,7 @@ func TestServerConfigMigrateV2toV29(t *testing.T) {
|
||||
if globalServerConfig.Credential.AccessKey != accessKey {
|
||||
t.Fatalf("Access key lost during migration, expected: %v, found:%v", accessKey, globalServerConfig.Credential.AccessKey)
|
||||
}
|
||||
|
||||
if globalServerConfig.Credential.SecretKey != secretKey {
|
||||
t.Fatalf("Secret key lost during migration, expected: %v, found: %v", secretKey, globalServerConfig.Credential.SecretKey)
|
||||
}
|
||||
@@ -234,7 +236,8 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
setConfigDir(rootPath)
|
||||
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
@@ -318,7 +321,6 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
||||
if err := migrateV26ToV27(); err == nil {
|
||||
t.Fatal("migrateConfigV26ToV27() should fail with a corrupted json")
|
||||
}
|
||||
|
||||
if err := migrateV27ToV28(); err == nil {
|
||||
t.Fatal("migrateConfigV27ToV28() should fail with a corrupted json")
|
||||
}
|
||||
@@ -331,7 +333,8 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
setConfigDir(rootPath)
|
||||
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
|
||||
for i := 3; i <= 17; i++ {
|
||||
|
||||
@@ -22,6 +22,8 @@ import (
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
@@ -261,9 +263,6 @@ type serverConfigV7 struct {
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV1 `json:"notify"`
|
||||
|
||||
// Read Write mutex.
|
||||
rwMutex *sync.RWMutex
|
||||
}
|
||||
|
||||
// serverConfigV8 server configuration version '8'. Adds NATS notifier
|
||||
@@ -280,9 +279,6 @@ type serverConfigV8 struct {
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV1 `json:"notify"`
|
||||
|
||||
// Read Write mutex.
|
||||
rwMutex *sync.RWMutex
|
||||
}
|
||||
|
||||
// serverConfigV9 server configuration version '9'. Adds PostgreSQL
|
||||
@@ -299,9 +295,6 @@ type serverConfigV9 struct {
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV1 `json:"notify"`
|
||||
|
||||
// Read Write mutex.
|
||||
rwMutex *sync.RWMutex
|
||||
}
|
||||
|
||||
type loggerV7 struct {
|
||||
@@ -371,7 +364,7 @@ type serverConfigV12 struct {
|
||||
Notify notifierV2 `json:"notify"`
|
||||
}
|
||||
|
||||
type notifier struct {
|
||||
type notifierV3 struct {
|
||||
AMQP map[string]target.AMQPArgs `json:"amqp"`
|
||||
Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
|
||||
Kafka map[string]target.KafkaArgs `json:"kafka"`
|
||||
@@ -396,7 +389,7 @@ type serverConfigV13 struct {
|
||||
Logger *loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV14 server configuration version '14' which is like
|
||||
@@ -413,7 +406,7 @@ type serverConfigV14 struct {
|
||||
Logger *loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV15 server configuration version '15' which is like
|
||||
@@ -430,7 +423,7 @@ type serverConfigV15 struct {
|
||||
Logger *loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// FileLogger is introduced to workaround the dependency about logrus
|
||||
@@ -468,7 +461,7 @@ type serverConfigV16 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV17 server configuration version '17' which is like
|
||||
@@ -487,7 +480,7 @@ type serverConfigV17 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV18 server configuration version '18' which is like
|
||||
@@ -506,7 +499,7 @@ type serverConfigV18 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV19 server configuration version '19' which is like
|
||||
@@ -524,7 +517,7 @@ type serverConfigV19 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV20 server configuration version '20' which is like
|
||||
@@ -543,7 +536,7 @@ type serverConfigV20 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV21 is just like version '20' without logger field
|
||||
@@ -558,7 +551,7 @@ type serverConfigV21 struct {
|
||||
Domain string `json:"domain"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV22 is just like version '21' with added support
|
||||
@@ -579,7 +572,7 @@ type serverConfigV22 struct {
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV23 is just like version '22' with addition of cache field.
|
||||
@@ -602,7 +595,7 @@ type serverConfigV23 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV24 is just like version '23', we had to revert
|
||||
@@ -626,7 +619,7 @@ type serverConfigV24 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV25 is just like version '24', stores additionally
|
||||
@@ -653,7 +646,7 @@ type serverConfigV25 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV26 is just like version '25', stores additionally
|
||||
@@ -677,7 +670,7 @@ type serverConfigV26 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
type loggerConsole struct {
|
||||
@@ -718,7 +711,7 @@ type serverConfigV27 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
@@ -749,14 +742,150 @@ type serverConfigV28 struct {
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
}
|
||||
|
||||
// serverConfigV29 is just like version '28', browser and domain are deprecated.
|
||||
type serverConfigV29 struct {
|
||||
// serverConfigV29 is just like version '28'.
|
||||
type serverConfigV29 serverConfigV28
|
||||
|
||||
// compressionConfig represents the compression settings.
|
||||
type compressionConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Extensions []string `json:"extensions"`
|
||||
MimeTypes []string `json:"mime-types"`
|
||||
}
|
||||
|
||||
// serverConfigV30 is just like version '29', stores additionally
|
||||
// extensions and mimetypes fields for compression.
|
||||
type serverConfigV30 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// KMS configuration
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
|
||||
// Compression configuration
|
||||
Compression compressionConfig `json:"compress"`
|
||||
}
|
||||
|
||||
// serverConfigV31 is just like version '30', with OPA and OpenID configuration.
|
||||
type serverConfigV31 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// KMS configuration
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
|
||||
// Compression configuration
|
||||
Compression compressionConfig `json:"compress"`
|
||||
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS validator.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
Policy struct {
|
||||
// OPA configuration.
|
||||
OPA iampolicy.OpaArgs `json:"opa"`
|
||||
|
||||
// Add new external policy enforcements here.
|
||||
} `json:"policy"`
|
||||
}
|
||||
|
||||
type notifier struct {
|
||||
AMQP map[string]target.AMQPArgs `json:"amqp"`
|
||||
Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
|
||||
Kafka map[string]target.KafkaArgs `json:"kafka"`
|
||||
MQTT map[string]target.MQTTArgs `json:"mqtt"`
|
||||
MySQL map[string]target.MySQLArgs `json:"mysql"`
|
||||
NATS map[string]target.NATSArgs `json:"nats"`
|
||||
NSQ map[string]target.NSQArgs `json:"nsq"`
|
||||
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
|
||||
Redis map[string]target.RedisArgs `json:"redis"`
|
||||
Webhook map[string]target.WebhookArgs `json:"webhook"`
|
||||
}
|
||||
|
||||
// serverConfigV32 is just like version '31' with added nsq notifer.
|
||||
type serverConfigV32 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// KMS configuration
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
|
||||
// Compression configuration
|
||||
Compression compressionConfig `json:"compress"`
|
||||
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS validator.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
Policy struct {
|
||||
// OPA configuration.
|
||||
OPA iampolicy.OpaArgs `json:"opa"`
|
||||
|
||||
// Add new external policy enforcements here.
|
||||
} `json:"policy"`
|
||||
}
|
||||
|
||||
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir, queueLimit with MQTT.
|
||||
type serverConfigV33 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
|
||||
Version string `json:"version"`
|
||||
@@ -780,4 +909,21 @@ type serverConfigV29 struct {
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
|
||||
// Compression configuration
|
||||
Compression compressionConfig `json:"compress"`
|
||||
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS validator.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
Policy struct {
|
||||
// OPA configuration.
|
||||
OPA iampolicy.OpaArgs `json:"opa"`
|
||||
|
||||
// Add new external policy enforcements here.
|
||||
} `json:"policy"`
|
||||
}
|
||||
|
||||
206
cmd/config.go
206
cmd/config.go
@@ -20,16 +20,11 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"time"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
@@ -55,23 +50,14 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config *serverCon
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
if globalEtcdClient != nil {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
_, err = globalEtcdClient.Put(timeoutCtx, configFile, string(data))
|
||||
defer cancel()
|
||||
return err
|
||||
return saveConfigEtcd(ctx, globalEtcdClient, configFile, data)
|
||||
}
|
||||
|
||||
// Create a backup of the current config
|
||||
reader, err := readConfig(ctx, objAPI, configFile)
|
||||
oldData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err == nil {
|
||||
var oldData []byte
|
||||
oldData, err = ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
backupConfigFile := path.Join(minioConfigPrefix, minioConfigBackupFile)
|
||||
err = saveConfig(objAPI, backupConfigFile, oldData)
|
||||
if err != nil {
|
||||
if err = saveConfig(ctx, objAPI, backupConfigFile, oldData); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@@ -81,40 +67,18 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config *serverCon
|
||||
}
|
||||
|
||||
// Save the new config in the std config path
|
||||
return saveConfig(objAPI, configFile, data)
|
||||
}
|
||||
|
||||
func readConfigEtcd(configFile string) ([]byte, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
resp, err := globalEtcdClient.Get(ctx, configFile)
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
if string(ev.Key) == configFile {
|
||||
return ev.Value, nil
|
||||
}
|
||||
}
|
||||
return nil, errConfigNotFound
|
||||
return saveConfig(ctx, objAPI, configFile, data)
|
||||
}
|
||||
|
||||
func readServerConfig(ctx context.Context, objAPI ObjectLayer) (*serverConfig, error) {
|
||||
var configData []byte
|
||||
var err error
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
if globalEtcdClient != nil {
|
||||
configData, err = readConfigEtcd(configFile)
|
||||
configData, err = readConfigEtcd(ctx, globalEtcdClient, configFile)
|
||||
} else {
|
||||
var reader io.Reader
|
||||
reader, err = readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configData, err = ioutil.ReadAll(reader)
|
||||
configData, err = readConfig(ctx, objAPI, configFile)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -129,82 +93,17 @@ func readServerConfig(ctx context.Context, objAPI ObjectLayer) (*serverConfig, e
|
||||
}
|
||||
|
||||
var config = &serverConfig{}
|
||||
if err := json.Unmarshal(configData, config); err != nil {
|
||||
if err = json.Unmarshal(configData, config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := quick.CheckData(config); err != nil {
|
||||
if err = quick.CheckData(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func checkServerConfigEtcd(configFile string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
resp, err := globalEtcdClient.Get(ctx, configFile)
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
return errConfigNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkServerConfig(ctx context.Context, objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
if globalEtcdClient != nil {
|
||||
return checkServerConfigEtcd(configFile)
|
||||
}
|
||||
|
||||
if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile, ObjectOptions{}); err != nil {
|
||||
// Convert ObjectNotFound to errConfigNotFound
|
||||
if isErrObjectNotFound(err) {
|
||||
return errConfigNotFound
|
||||
}
|
||||
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func saveConfig(objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(context.Background(), minioMetaBucket, configFile, hashReader, nil, ObjectOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
var errConfigNotFound = errors.New("config file not found")
|
||||
|
||||
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) (*bytes.Buffer, error) {
|
||||
var buffer bytes.Buffer
|
||||
// Read entire content by setting size to -1
|
||||
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
|
||||
// Convert ObjectNotFound and IncompleteBody errors into errConfigNotFound
|
||||
if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return config not found on empty content.
|
||||
if buffer.Len() == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
return &buffer, nil
|
||||
}
|
||||
|
||||
// ConfigSys - config system.
|
||||
type ConfigSys struct{}
|
||||
|
||||
@@ -228,22 +127,18 @@ func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
|
||||
// of the object layer.
|
||||
// - Write quorum not met when upgrading configuration
|
||||
// version is needed.
|
||||
retryTimerCh := newRetryTimerSimple(doneCh)
|
||||
for {
|
||||
select {
|
||||
case _ = <-retryTimerCh:
|
||||
err := initConfig(objAPI)
|
||||
if err != nil {
|
||||
if isInsufficientReadQuorum(err) || isInsufficientWriteQuorum(err) {
|
||||
logger.Info("Waiting for configuration to be initialized..")
|
||||
continue
|
||||
}
|
||||
return err
|
||||
for range newRetryTimerSimple(doneCh) {
|
||||
if err := initConfig(objAPI); err != nil {
|
||||
if strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
|
||||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
|
||||
logger.Info("Waiting for configuration to be initialized..")
|
||||
continue
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewConfigSys - creates new config system object.
|
||||
@@ -251,42 +146,33 @@ func NewConfigSys() *ConfigSys {
|
||||
return &ConfigSys{}
|
||||
}
|
||||
|
||||
// Migrates ${HOME}/.minio/config.json to '<export_path>/.minio.sys/config/config.json'
|
||||
func migrateConfigToMinioSys(objAPI ObjectLayer) error {
|
||||
defer os.Rename(getConfigFile(), getConfigFile()+".deprecated")
|
||||
|
||||
// Verify if backend already has the file.
|
||||
if err := checkServerConfig(context.Background(), objAPI); err != errConfigNotFound {
|
||||
return err
|
||||
} // if errConfigNotFound proceed to migrate..
|
||||
|
||||
var config = &serverConfig{}
|
||||
if _, err := Load(getConfigFile(), config); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
// Read from deprecate file as well if necessary.
|
||||
if _, err = Load(getConfigFile()+".deprecated", config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return saveServerConfig(context.Background(), objAPI, config)
|
||||
}
|
||||
|
||||
// Initialize and load config from remote etcd or local config directory
|
||||
func initConfig(objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
if globalEtcdClient != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
resp, err := globalEtcdClient.Get(ctx, getConfigFile())
|
||||
cancel()
|
||||
if err == nil && resp.Count > 0 {
|
||||
return migrateConfig()
|
||||
if err := checkConfigEtcd(context.Background(), globalEtcdClient, getConfigFile()); err != nil {
|
||||
if err == errConfigNotFound {
|
||||
// Migrates all configs at old location.
|
||||
if err = migrateConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrates etcd ${HOME}/.minio/config.json to '/config/config.json'
|
||||
if err = migrateConfigToMinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Watch config for changes and reloads them.
|
||||
go watchConfigEtcd(objAPI, configFile, loadConfig)
|
||||
|
||||
} else {
|
||||
if isFile(getConfigFile()) {
|
||||
if err := migrateConfig(); err != nil {
|
||||
@@ -296,25 +182,15 @@ func initConfig(objAPI ObjectLayer) error {
|
||||
// Migrates ${HOME}/.minio/config.json or config.json.deprecated
|
||||
// to '<export_path>/.minio.sys/config/config.json'
|
||||
// ignore if the file doesn't exist.
|
||||
if err := migrateConfigToMinioSys(objAPI); err != nil && !os.IsNotExist(err) {
|
||||
if err := migrateConfigToMinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkServerConfig(context.Background(), objAPI); err != nil {
|
||||
if err == errConfigNotFound {
|
||||
// Config file does not exist, we create it fresh and return upon success.
|
||||
if err = newConfig(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Migrates backend '<export_path>/.minio.sys/config/config.json' to latest version.
|
||||
if err := migrateMinioSysConfig(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := migrateMinioSysConfig(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return loadConfig(objAPI)
|
||||
}
|
||||
|
||||
@@ -17,21 +17,22 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// Writes S3 compatible copy part range error.
|
||||
func writeCopyPartErr(w http.ResponseWriter, err error, url *url.URL) {
|
||||
func writeCopyPartErr(ctx context.Context, w http.ResponseWriter, err error, url *url.URL, browser bool) {
|
||||
switch err {
|
||||
case errInvalidRange:
|
||||
writeErrorResponse(w, ErrInvalidCopyPartRange, url)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyPartRange), url, browser)
|
||||
return
|
||||
case errInvalidRangeSource:
|
||||
writeErrorResponse(w, ErrInvalidCopyPartRangeSource, url)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyPartRangeSource), url, browser)
|
||||
return
|
||||
default:
|
||||
writeErrorResponse(w, ErrInternalError, url)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), url, browser)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -43,34 +44,6 @@ func writeCopyPartErr(w http.ResponseWriter, err error, url *url.URL) {
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
|
||||
// for full details. This function treats an empty rangeString as
|
||||
// referring to the whole resource.
|
||||
//
|
||||
// In addition to parsing the range string, it also validates the
|
||||
// specified range against the given object size, so that Copy API
|
||||
// specific error can be returned.
|
||||
func parseCopyPartRange(rangeString string, resourceSize int64) (offset, length int64, err error) {
|
||||
var hrange *HTTPRangeSpec
|
||||
if rangeString != "" {
|
||||
hrange, err = parseRequestRangeSpec(rangeString)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
|
||||
// Require that both start and end are specified.
|
||||
if hrange.IsSuffixLength || hrange.Start == -1 || hrange.End == -1 {
|
||||
return -1, -1, errInvalidRange
|
||||
}
|
||||
|
||||
// Validate specified range against object size.
|
||||
if hrange.Start >= resourceSize || hrange.End >= resourceSize {
|
||||
return -1, -1, errInvalidRangeSource
|
||||
}
|
||||
}
|
||||
|
||||
return hrange.GetOffsetLength(resourceSize)
|
||||
}
|
||||
|
||||
// parseCopyPartRangeSpec transforms a range string (e.g. bytes=3-4) to HTTPRangeSpec
|
||||
// and returns errors if weird values
|
||||
func parseCopyPartRangeSpec(rangeString string) (hrange *HTTPRangeSpec, err error) {
|
||||
hrange, err = parseRequestRangeSpec(rangeString)
|
||||
if err != nil {
|
||||
|
||||
@@ -19,7 +19,7 @@ package cmd
|
||||
import "testing"
|
||||
|
||||
// Test parseCopyPartRange()
|
||||
func TestParseCopyPartRange(t *testing.T) {
|
||||
func TestParseCopyPartRangeSpec(t *testing.T) {
|
||||
// Test success cases.
|
||||
successCases := []struct {
|
||||
rangeString string
|
||||
@@ -29,16 +29,21 @@ func TestParseCopyPartRange(t *testing.T) {
|
||||
{"bytes=2-5", 2, 5},
|
||||
{"bytes=2-9", 2, 9},
|
||||
{"bytes=2-2", 2, 2},
|
||||
{"", 0, 9},
|
||||
{"bytes=0000-0006", 0, 6},
|
||||
}
|
||||
objectSize := int64(10)
|
||||
|
||||
for _, successCase := range successCases {
|
||||
start, length, err := parseCopyPartRange(successCase.rangeString, 10)
|
||||
rs, err := parseCopyPartRangeSpec(successCase.rangeString)
|
||||
if err != nil {
|
||||
t.Fatalf("expected: <nil>, got: %s", err)
|
||||
}
|
||||
|
||||
start, length, err1 := rs.GetOffsetLength(objectSize)
|
||||
if err1 != nil {
|
||||
t.Fatalf("expected: <nil>, got: %s", err1)
|
||||
}
|
||||
|
||||
if start != successCase.offsetBegin {
|
||||
t.Fatalf("expected: %d, got: %d", successCase.offsetBegin, start)
|
||||
}
|
||||
@@ -61,9 +66,11 @@ func TestParseCopyPartRange(t *testing.T) {
|
||||
"bytes=2 - 5",
|
||||
"bytes=0-0,-1",
|
||||
"bytes=2-5 ",
|
||||
"bytes=-1",
|
||||
"bytes=1-",
|
||||
}
|
||||
for _, rangeString := range invalidRangeStrings {
|
||||
if _, _, err := parseCopyPartRange(rangeString, 10); err == nil {
|
||||
if _, err := parseCopyPartRangeSpec(rangeString); err == nil {
|
||||
t.Fatalf("expected: an error, got: <nil> for range %s", rangeString)
|
||||
}
|
||||
}
|
||||
@@ -74,8 +81,14 @@ func TestParseCopyPartRange(t *testing.T) {
|
||||
"bytes=20-30",
|
||||
}
|
||||
for _, rangeString := range errorRangeString {
|
||||
if _, _, err := parseCopyPartRange(rangeString, 10); err != errInvalidRangeSource {
|
||||
t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err)
|
||||
rs, err := parseCopyPartRangeSpec(rangeString)
|
||||
if err == nil {
|
||||
err1 := checkCopyPartRangeWithSize(rs, objectSize)
|
||||
if err1 != errInvalidRangeSource {
|
||||
t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err)
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("expected: %s, got: <nil>", errInvalidRangeSource)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,5 +16,6 @@ package crypto
|
||||
|
||||
// KMSConfig has the KMS config for hashicorp vault
|
||||
type KMSConfig struct {
|
||||
Vault VaultConfig `json:"vault"`
|
||||
AutoEncryption bool `json:"-"`
|
||||
Vault VaultConfig `json:"vault"`
|
||||
}
|
||||
|
||||
@@ -95,7 +95,7 @@ var ssecIsRequestedTests = []struct {
|
||||
Header http.Header
|
||||
Expected bool
|
||||
}{
|
||||
{Header: http.Header{}, Expected: false}, // 0
|
||||
{Header: http.Header{}, Expected: false}, // 0
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"}}, Expected: true}, // 1
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}}, Expected: true}, // 2
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
|
||||
@@ -137,7 +137,7 @@ var ssecCopyIsRequestedTests = []struct {
|
||||
Header http.Header
|
||||
Expected bool
|
||||
}{
|
||||
{Header: http.Header{}, Expected: false}, // 0
|
||||
{Header: http.Header{}, Expected: false}, // 0
|
||||
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"}}, Expected: true}, // 1
|
||||
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}}, Expected: true}, // 2
|
||||
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
|
||||
|
||||
@@ -140,3 +140,37 @@ func (key ObjectKey) DerivePartKey(id uint32) (partKey [32]byte) {
|
||||
mac.Sum(partKey[:0])
|
||||
return partKey
|
||||
}
|
||||
|
||||
// SealETag seals the etag using the object key.
|
||||
// It does not encrypt empty ETags because such ETags indicate
|
||||
// that the S3 client hasn't sent an ETag = MD5(object) and
|
||||
// the backend can pick an ETag value.
|
||||
func (key ObjectKey) SealETag(etag []byte) []byte {
|
||||
if len(etag) == 0 { // don't encrypt empty ETag - only if client sent ETag = MD5(object)
|
||||
return etag
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte("SSE-etag"))
|
||||
if _, err := sio.Encrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unable to encrypt ETag using object key"))
|
||||
}
|
||||
return buffer.Bytes()
|
||||
}
|
||||
|
||||
// UnsealETag unseals the etag using the provided object key.
|
||||
// It does not try to decrypt the ETag if len(etag) == 16
|
||||
// because such ETags indicate that the S3 client hasn't sent
|
||||
// an ETag = MD5(object) and the backend has picked an ETag value.
|
||||
func (key ObjectKey) UnsealETag(etag []byte) ([]byte, error) {
|
||||
if !IsETagSealed(etag) {
|
||||
return etag, nil
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte("SSE-etag"))
|
||||
if _, err := sio.Decrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
@@ -166,3 +166,31 @@ func TestDerivePartKey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var sealUnsealETagTests = []string{
|
||||
"",
|
||||
"90682b8e8cc7609c",
|
||||
"90682b8e8cc7609c4671e1d64c73fc30",
|
||||
"90682b8e8cc7609c4671e1d64c73fc307fb3104f",
|
||||
}
|
||||
|
||||
func TestSealETag(t *testing.T) {
|
||||
var key ObjectKey
|
||||
for i := range key {
|
||||
key[i] = byte(i)
|
||||
}
|
||||
for i, etag := range sealUnsealETagTests {
|
||||
tag, err := hex.DecodeString(etag)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: failed to decode etag: %s", i, err)
|
||||
}
|
||||
sealedETag := key.SealETag(tag)
|
||||
unsealedETag, err := key.UnsealETag(sealedETag)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: failed to decrypt etag: %s", i, err)
|
||||
}
|
||||
if !bytes.Equal(unsealedETag, tag) {
|
||||
t.Errorf("Test %d: unsealed etag does not match: got %s - want %s", i, hex.EncodeToString(unsealedETag), etag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,6 +40,26 @@ func RemoveSensitiveEntries(metadata map[string]string) { // The functions is te
|
||||
delete(metadata, SSECopyKey)
|
||||
}
|
||||
|
||||
// RemoveSSEHeaders removes all crypto-specific SSE
|
||||
// header entries from the metadata map.
|
||||
func RemoveSSEHeaders(metadata map[string]string) {
|
||||
delete(metadata, SSEHeader)
|
||||
delete(metadata, SSECKeyMD5)
|
||||
delete(metadata, SSECAlgorithm)
|
||||
}
|
||||
|
||||
// RemoveInternalEntries removes all crypto-specific internal
|
||||
// metadata entries from the metadata map.
|
||||
func RemoveInternalEntries(metadata map[string]string) {
|
||||
delete(metadata, SSEMultipart)
|
||||
delete(metadata, SSEIV)
|
||||
delete(metadata, SSESealAlgorithm)
|
||||
delete(metadata, SSECSealedKey)
|
||||
delete(metadata, S3SealedKey)
|
||||
delete(metadata, S3KMSKeyID)
|
||||
delete(metadata, S3KMSSealedKey)
|
||||
}
|
||||
|
||||
// IsEncrypted returns true if the object metadata indicates
|
||||
// that it was uploaded using some form of server-side-encryption.
|
||||
//
|
||||
@@ -219,3 +239,6 @@ func (ssec) ParseMetadata(metadata map[string]string) (sealedKey SealedKey, err
|
||||
copy(sealedKey.Key[:], encryptedKey)
|
||||
return sealedKey, nil
|
||||
}
|
||||
|
||||
// IsETagSealed returns true if the etag seems to be encrypted.
|
||||
func IsETagSealed(etag []byte) bool { return len(etag) > 16 }
|
||||
|
||||
@@ -17,6 +17,7 @@ package crypto
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -364,3 +365,75 @@ func TestSSECCreateMetadata(t *testing.T) {
|
||||
}()
|
||||
_ = SSEC.CreateMetadata(nil, SealedKey{Algorithm: InsecureSealAlgorithm})
|
||||
}
|
||||
|
||||
var isETagSealedTests = []struct {
|
||||
ETag string
|
||||
IsSealed bool
|
||||
}{
|
||||
{ETag: "", IsSealed: false}, // 0
|
||||
{ETag: "90682b8e8cc7609c4671e1d64c73fc30", IsSealed: false}, // 1
|
||||
{ETag: "f201040c9dc593e39ea004dc1323699bcd", IsSealed: true}, // 2 not valid ciphertext but looks like sealed ETag
|
||||
{ETag: "20000f00fba2ee2ae4845f725964eeb9e092edfabc7ab9f9239e8344341f769a51ce99b4801b0699b92b16a72fa94972", IsSealed: true}, // 3
|
||||
}
|
||||
|
||||
func TestIsETagSealed(t *testing.T) {
|
||||
for i, test := range isETagSealedTests {
|
||||
etag, err := hex.DecodeString(test.ETag)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: failed to decode etag: %s", i, err)
|
||||
}
|
||||
if sealed := IsETagSealed(etag); sealed != test.IsSealed {
|
||||
t.Errorf("Test %d: got %v - want %v", i, sealed, test.IsSealed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var removeInternalEntriesTests = []struct {
|
||||
Metadata, Expected map[string]string
|
||||
}{
|
||||
{ // 0
|
||||
Metadata: map[string]string{
|
||||
SSEMultipart: "",
|
||||
SSEIV: "",
|
||||
SSESealAlgorithm: "",
|
||||
SSECSealedKey: "",
|
||||
S3SealedKey: "",
|
||||
S3KMSKeyID: "",
|
||||
S3KMSSealedKey: "",
|
||||
},
|
||||
Expected: map[string]string{},
|
||||
},
|
||||
{ // 1
|
||||
Metadata: map[string]string{
|
||||
SSEMultipart: "",
|
||||
SSEIV: "",
|
||||
"X-Amz-Meta-A": "X",
|
||||
"X-Minio-Internal-B": "Y",
|
||||
},
|
||||
Expected: map[string]string{
|
||||
"X-Amz-Meta-A": "X",
|
||||
"X-Minio-Internal-B": "Y",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestRemoveInternalEntries(t *testing.T) {
|
||||
isEqual := func(x, y map[string]string) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
for k, v := range x {
|
||||
if u, ok := y[k]; !ok || v != u {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
for i, test := range removeInternalEntriesTests {
|
||||
RemoveInternalEntries(test.Metadata)
|
||||
if !isEqual(test.Metadata, test.Expected) {
|
||||
t.Errorf("Test %d: got %v - want %v", i, test.Metadata, test.Expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/ioutil"
|
||||
@@ -69,10 +71,59 @@ const (
|
||||
// domain is "SSE-S3".
|
||||
func (s3) String() string { return "SSE-S3" }
|
||||
|
||||
// UnsealObjectKey extracts and decrypts the sealed object key
|
||||
// from the metadata using KMS and returns the decrypted object
|
||||
// key.
|
||||
func (sse s3) UnsealObjectKey(kms KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
|
||||
keyID, kmsKey, sealedKey, err := sse.ParseMetadata(metadata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
unsealKey, err := kms.UnsealKey(keyID, kmsKey, Context{bucket: path.Join(bucket, object)})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = key.Unseal(unsealKey, sealedKey, sse.String(), bucket, object)
|
||||
return
|
||||
}
|
||||
|
||||
// String returns the SSE domain as string. For SSE-C the
|
||||
// domain is "SSE-C".
|
||||
func (ssec) String() string { return "SSE-C" }
|
||||
|
||||
// UnsealObjectKey extracts and decrypts the sealed object key
|
||||
// from the metadata using the SSE-C client key of the HTTP headers
|
||||
// and returns the decrypted object key.
|
||||
func (sse ssec) UnsealObjectKey(h http.Header, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
|
||||
clientKey, err := sse.ParseHTTP(h)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return unsealObjectKey(clientKey, metadata, bucket, object)
|
||||
}
|
||||
|
||||
// UnsealObjectKey extracts and decrypts the sealed object key
|
||||
// from the metadata using the SSE-Copy client key of the HTTP headers
|
||||
// and returns the decrypted object key.
|
||||
func (sse ssecCopy) UnsealObjectKey(h http.Header, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
|
||||
clientKey, err := sse.ParseHTTP(h)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return unsealObjectKey(clientKey, metadata, bucket, object)
|
||||
}
|
||||
|
||||
// unsealObjectKey decrypts and returns the sealed object key
|
||||
// from the metadata using the SSE-C client key.
|
||||
func unsealObjectKey(clientKey [32]byte, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
|
||||
sealedKey, err := SSEC.ParseMetadata(metadata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = key.Unseal(clientKey, sealedKey, SSEC.String(), bucket, object)
|
||||
return
|
||||
}
|
||||
|
||||
// EncryptSinglePart encrypts an io.Reader which must be the
|
||||
// the body of a single-part PUT request.
|
||||
func EncryptSinglePart(r io.Reader, key ObjectKey) io.Reader {
|
||||
|
||||
@@ -14,7 +14,10 @@
|
||||
|
||||
package crypto
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestS3String(t *testing.T) {
|
||||
const Domain = "SSE-S3"
|
||||
@@ -29,3 +32,195 @@ func TestSSECString(t *testing.T) {
|
||||
t.Errorf("SSEC's string method returns wrong domain: got '%s' - want '%s'", domain, Domain)
|
||||
}
|
||||
}
|
||||
|
||||
var ssecUnsealObjectKeyTests = []struct {
|
||||
Headers http.Header
|
||||
Bucket, Object string
|
||||
Metadata map[string]string
|
||||
|
||||
ExpectedErr error
|
||||
}{
|
||||
{ // 0 - Valid HTTP headers and valid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{ // 1 - Valid HTTP headers but invalid metadata entries for bucket/object2
|
||||
Headers: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object2",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: ErrSecretKeyMismatch,
|
||||
},
|
||||
{ // 2 - Valid HTTP headers but invalid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: errMissingInternalSealAlgorithm,
|
||||
},
|
||||
{ // 3 - Invalid HTTP headers for valid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: ErrMissingCustomerKeyMD5,
|
||||
},
|
||||
}
|
||||
|
||||
func TestSSECUnsealObjectKey(t *testing.T) {
|
||||
for i, test := range ssecUnsealObjectKeyTests {
|
||||
if _, err := SSEC.UnsealObjectKey(test.Headers, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var sseCopyUnsealObjectKeyTests = []struct {
|
||||
Headers http.Header
|
||||
Bucket, Object string
|
||||
Metadata map[string]string
|
||||
|
||||
ExpectedErr error
|
||||
}{
|
||||
{ // 0 - Valid HTTP headers and valid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{ // 1 - Valid HTTP headers but invalid metadata entries for bucket/object2
|
||||
Headers: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object2",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: ErrSecretKeyMismatch,
|
||||
},
|
||||
{ // 2 - Valid HTTP headers but invalid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: errMissingInternalSealAlgorithm,
|
||||
},
|
||||
{ // 3 - Invalid HTTP headers for valid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: ErrMissingCustomerKeyMD5,
|
||||
},
|
||||
}
|
||||
|
||||
func TestSSECopyUnsealObjectKey(t *testing.T) {
|
||||
for i, test := range sseCopyUnsealObjectKeyTests {
|
||||
if _, err := SSECopy.UnsealObjectKey(test.Headers, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var s3UnsealObjectKeyTests = []struct {
|
||||
KMS KMS
|
||||
Bucket, Object string
|
||||
Metadata map[string]string
|
||||
|
||||
ExpectedErr error
|
||||
}{
|
||||
{ // 0 - Valid KMS key-ID and valid metadata entries for bucket/object
|
||||
KMS: NewKMS([32]byte{}),
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "hhVY0LKR1YtZbzAKxTWUfZt5enDfYX6Fxz1ma8Kiudc=",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfALhsOeD5AE3s5Zgq3DZ5VFGsOa3B0ksVC86veDcaj+fXv2U0VadhPaOKYr9Emd5ssOsO0uIhIIrKiOy9rA==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAMRS2iw45FsfiF3QXajSYVWj1lxMpQm6DxDGPtADCX6fJQQ4atHBtfpgqJFyeQmIHsm0FBI+UlHw1Lv4ug==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "test-key-1",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{ // 1 - Valid KMS key-ID for invalid metadata entries for bucket/object
|
||||
KMS: NewKMS([32]byte{}),
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "hhVY0LKR1YtZbzAKxTWUfZt5enDfYX6Fxz1ma8Kiudc=",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfALhsOeD5AE3s5Zgq3DZ5VFGsOa3B0ksVC86veDcaj+fXv2U0VadhPaOKYr9Emd5ssOsO0uIhIIrKiOy9rA==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAMRS2iw45FsfiF3QXajSYVWj1lxMpQm6DxDGPtADCX6fJQQ4atHBtfpgqJFyeQmIHsm0FBI+UlHw1Lv4ug==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "test-key-1",
|
||||
},
|
||||
ExpectedErr: errMissingInternalSealAlgorithm,
|
||||
},
|
||||
}
|
||||
|
||||
func TestS3UnsealObjectKey(t *testing.T) {
|
||||
for i, test := range s3UnsealObjectKeyTests {
|
||||
if _, err := S3.UnsealObjectKey(test.KMS, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,238 +19,234 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
vault "github.com/hashicorp/vault/api"
|
||||
)
|
||||
|
||||
const (
|
||||
// VaultEndpointEnv Vault endpoint environment variable
|
||||
VaultEndpointEnv = "MINIO_SSE_VAULT_ENDPOINT"
|
||||
// vaultAuthTypeEnv type of vault auth to be used
|
||||
vaultAuthTypeEnv = "MINIO_SSE_VAULT_AUTH_TYPE"
|
||||
// vaultAppRoleIDEnv Vault AppRole ID environment variable
|
||||
vaultAppRoleIDEnv = "MINIO_SSE_VAULT_APPROLE_ID"
|
||||
// vaultAppSecretIDEnv Vault AppRole Secret environment variable
|
||||
vaultAppSecretIDEnv = "MINIO_SSE_VAULT_APPROLE_SECRET"
|
||||
// vaultKeyVersionEnv Vault Key Version environment variable
|
||||
vaultKeyVersionEnv = "MINIO_SSE_VAULT_KEY_VERSION"
|
||||
// vaultKeyNameEnv Vault Encryption Key Name environment variable
|
||||
vaultKeyNameEnv = "MINIO_SSE_VAULT_KEY_NAME"
|
||||
)
|
||||
|
||||
var (
|
||||
//ErrKMSAuthLogin is raised when there is a failure authenticating to KMS
|
||||
ErrKMSAuthLogin = errors.New("Vault service did not return auth info")
|
||||
)
|
||||
|
||||
// VaultKey represents vault encryption key-ring.
|
||||
type VaultKey struct {
|
||||
Name string `json:"name"` // The name of the encryption key-ring
|
||||
Version int `json:"version"` // The key version
|
||||
}
|
||||
|
||||
// VaultAuth represents vault authentication type.
|
||||
// Currently the only supported authentication type is AppRole.
|
||||
type VaultAuth struct {
|
||||
Type string `json:"type"` // The authentication type
|
||||
AppRole VaultAppRole `json:"approle"` // The AppRole authentication credentials
|
||||
}
|
||||
|
||||
// VaultAppRole represents vault AppRole authentication credentials
|
||||
type VaultAppRole struct {
|
||||
ID string `json:"id"` // The AppRole access ID
|
||||
Secret string `json:"secret"` // The AppRole secret
|
||||
}
|
||||
|
||||
// VaultConfig represents vault configuration.
|
||||
type VaultConfig struct {
|
||||
Endpoint string `json:"endpoint"` // The vault API endpoint as URL
|
||||
CAPath string `json:"-"` // The path to PEM-encoded certificate files used for mTLS. Currently not used in config file.
|
||||
Auth VaultAuth `json:"auth"` // The vault authentication configuration
|
||||
Key VaultKey `json:"key-id"` // The named key used for key-generation / decryption.
|
||||
Namespace string `json:"-"` // The vault namespace of enterprise vault instances
|
||||
}
|
||||
|
||||
// vaultService represents a connection to a vault KMS.
|
||||
type vaultService struct {
|
||||
config *VaultConfig
|
||||
client *vault.Client
|
||||
secret *vault.Secret
|
||||
leaseDuration time.Duration
|
||||
}
|
||||
|
||||
// return transit secret engine's path for generate data key operation
|
||||
func (v *vaultService) genDataKeyEndpoint(key string) string {
|
||||
return "/transit/datakey/plaintext/" + key
|
||||
var _ KMS = (*vaultService)(nil) // compiler check that *vaultService implements KMS
|
||||
|
||||
// empty/default vault configuration used to check whether a particular is empty.
|
||||
var emptyVaultConfig = VaultConfig{}
|
||||
|
||||
// IsEmpty returns true if the vault config struct is an
|
||||
// empty configuration.
|
||||
func (v *VaultConfig) IsEmpty() bool { return *v == emptyVaultConfig }
|
||||
|
||||
// Verify returns a nil error if the vault configuration
|
||||
// is valid. A valid configuration is either empty or
|
||||
// contains valid non-default values.
|
||||
func (v *VaultConfig) Verify() (err error) {
|
||||
if v.IsEmpty() {
|
||||
return // an empty configuration is valid
|
||||
}
|
||||
switch {
|
||||
case v.Endpoint == "":
|
||||
err = errors.New("crypto: missing hashicorp vault endpoint")
|
||||
case strings.ToLower(v.Auth.Type) != "approle":
|
||||
err = fmt.Errorf("crypto: invalid hashicorp vault authentication type: %s is not supported", v.Auth.Type)
|
||||
case v.Auth.AppRole.ID == "":
|
||||
err = errors.New("crypto: missing hashicorp vault AppRole ID")
|
||||
case v.Auth.AppRole.Secret == "":
|
||||
err = errors.New("crypto: missing hashicorp vault AppSecret ID")
|
||||
case v.Key.Name == "":
|
||||
err = errors.New("crypto: missing hashicorp vault key name")
|
||||
case v.Key.Version < 0:
|
||||
err = errors.New("crypto: invalid hashicorp vault key version: The key version must not be negative")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// return transit secret engine's path for decrypt operation
|
||||
func (v *vaultService) decryptEndpoint(key string) string {
|
||||
return "/transit/decrypt/" + key
|
||||
}
|
||||
|
||||
// VaultKey represents vault encryption key-id name & version
|
||||
type VaultKey struct {
|
||||
Name string `json:"name"`
|
||||
Version int `json:"version"`
|
||||
}
|
||||
|
||||
// VaultAuth represents vault auth type to use. For now, AppRole is the only supported
|
||||
// auth type.
|
||||
type VaultAuth struct {
|
||||
Type string `json:"type"`
|
||||
AppRole VaultAppRole `json:"approle"`
|
||||
}
|
||||
|
||||
// VaultAppRole represents vault approle credentials
|
||||
type VaultAppRole struct {
|
||||
ID string `json:"id"`
|
||||
Secret string `json:"secret"`
|
||||
}
|
||||
|
||||
// VaultConfig holds config required to start vault service
|
||||
type VaultConfig struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
Auth VaultAuth `json:"auth"`
|
||||
Key VaultKey `json:"key-id"`
|
||||
}
|
||||
|
||||
// validate whether all required env variables needed to start vault service have
|
||||
// been set
|
||||
func validateVaultConfig(c *VaultConfig) error {
|
||||
if c.Endpoint == "" {
|
||||
return fmt.Errorf("Missing hashicorp vault endpoint - %s is empty", VaultEndpointEnv)
|
||||
// NewVault initializes Hashicorp Vault KMS by authenticating
|
||||
// to Vault with the credentials in config and gets a client
|
||||
// token for future api calls.
|
||||
func NewVault(config VaultConfig) (KMS, error) {
|
||||
if config.IsEmpty() {
|
||||
return nil, errors.New("crypto: the hashicorp vault configuration must not be empty")
|
||||
}
|
||||
if strings.ToLower(c.Auth.Type) != "approle" {
|
||||
return fmt.Errorf("Unsupported hashicorp vault auth type - %s", vaultAuthTypeEnv)
|
||||
}
|
||||
if c.Auth.AppRole.ID == "" {
|
||||
return fmt.Errorf("Missing hashicorp vault AppRole ID - %s is empty", vaultAppRoleIDEnv)
|
||||
}
|
||||
if c.Auth.AppRole.Secret == "" {
|
||||
return fmt.Errorf("Missing hashicorp vault AppSecret ID - %s is empty", vaultAppSecretIDEnv)
|
||||
}
|
||||
if c.Key.Name == "" {
|
||||
return fmt.Errorf("Invalid value set in environment variable %s", vaultKeyNameEnv)
|
||||
}
|
||||
if c.Key.Version < 0 {
|
||||
return fmt.Errorf("Invalid value set in environment variable %s", vaultKeyVersionEnv)
|
||||
if err := config.Verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// authenticate to vault with app role id and app role secret, and get a client access token, lease duration
|
||||
func getVaultAccessToken(client *vault.Client, appRoleID, appSecret string) (token string, duration int, err error) {
|
||||
data := map[string]interface{}{
|
||||
"role_id": appRoleID,
|
||||
"secret_id": appSecret,
|
||||
vaultCfg := vault.Config{Address: config.Endpoint}
|
||||
if err := vaultCfg.ConfigureTLS(&vault.TLSConfig{CAPath: config.CAPath}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, e := client.Logical().Write("auth/approle/login", data)
|
||||
if e != nil {
|
||||
return token, duration, e
|
||||
}
|
||||
if resp.Auth == nil {
|
||||
return token, duration, ErrKMSAuthLogin
|
||||
}
|
||||
return resp.Auth.ClientToken, resp.Auth.LeaseDuration, nil
|
||||
}
|
||||
|
||||
// NewVaultConfig sets KMSConfig from environment
|
||||
// variables and performs validations.
|
||||
func NewVaultConfig() (KMSConfig, error) {
|
||||
kc := KMSConfig{}
|
||||
endpoint := os.Getenv(VaultEndpointEnv)
|
||||
roleID := os.Getenv(vaultAppRoleIDEnv)
|
||||
roleSecret := os.Getenv(vaultAppSecretIDEnv)
|
||||
keyName := os.Getenv(vaultKeyNameEnv)
|
||||
keyVersion := 0
|
||||
authType := "approle"
|
||||
if versionStr := os.Getenv(vaultKeyVersionEnv); versionStr != "" {
|
||||
version, err := strconv.Atoi(versionStr)
|
||||
if err != nil {
|
||||
return kc, fmt.Errorf("Unable to parse %s value (`%s`)", vaultKeyVersionEnv, versionStr)
|
||||
}
|
||||
keyVersion = version
|
||||
}
|
||||
// return if none of the vault env variables are configured
|
||||
if (endpoint == "") && (roleID == "") && (roleSecret == "") && (keyName == "") && (keyVersion == 0) {
|
||||
return kc, nil
|
||||
}
|
||||
c := VaultConfig{
|
||||
Endpoint: endpoint,
|
||||
Auth: VaultAuth{
|
||||
Type: authType,
|
||||
AppRole: VaultAppRole{
|
||||
ID: roleID,
|
||||
Secret: roleSecret,
|
||||
},
|
||||
},
|
||||
Key: VaultKey{
|
||||
Version: keyVersion,
|
||||
Name: keyName,
|
||||
},
|
||||
}
|
||||
if err := validateVaultConfig(&c); err != nil {
|
||||
return kc, err
|
||||
}
|
||||
kc.Vault = c
|
||||
return kc, nil
|
||||
}
|
||||
|
||||
// NewVault initializes Hashicorp Vault KMS by
|
||||
// authenticating to Vault with the credentials in KMSConfig,
|
||||
// and gets a client token for future api calls.
|
||||
func NewVault(kmsConf KMSConfig) (KMS, error) {
|
||||
config := kmsConf.Vault
|
||||
c, err := vault.NewClient(&vault.Config{
|
||||
Address: config.Endpoint,
|
||||
})
|
||||
client, err := vault.NewClient(&vaultCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accessToken, leaseDuration, err := getVaultAccessToken(c, config.Auth.AppRole.ID, config.Auth.AppRole.Secret)
|
||||
if err != nil {
|
||||
if config.Namespace != "" {
|
||||
client.SetNamespace(config.Namespace)
|
||||
}
|
||||
v := &vaultService{client: client, config: &config}
|
||||
if err := v.authenticate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// authenticate and get the access token
|
||||
c.SetToken(accessToken)
|
||||
v := vaultService{client: c, config: &config, leaseDuration: time.Duration(leaseDuration)}
|
||||
v.renewToken(c)
|
||||
return &v, nil
|
||||
v.renewToken()
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (v *vaultService) renewToken(c *vault.Client) {
|
||||
retryDelay := 1 * time.Minute
|
||||
// renewToken starts a new go-routine which renews
|
||||
// the vault authentication token periodically and re-authenticates
|
||||
// if the token renewal fails
|
||||
func (v *vaultService) renewToken() {
|
||||
retryDelay := v.leaseDuration / 2
|
||||
go func() {
|
||||
for {
|
||||
s, err := c.Auth().Token().RenewSelf(int(v.leaseDuration))
|
||||
if err != nil {
|
||||
if v.secret == nil {
|
||||
if err := v.authenticate(); err != nil {
|
||||
time.Sleep(retryDelay)
|
||||
continue
|
||||
}
|
||||
}
|
||||
s, err := v.client.Auth().Token().RenewSelf(int(v.leaseDuration))
|
||||
if err != nil || s == nil {
|
||||
v.secret = nil
|
||||
time.Sleep(retryDelay)
|
||||
continue
|
||||
}
|
||||
nextRenew := s.Auth.LeaseDuration / 2
|
||||
time.Sleep(time.Duration(nextRenew) * time.Second)
|
||||
if ok, err := s.TokenIsRenewable(); !ok || err != nil {
|
||||
v.secret = nil
|
||||
continue
|
||||
}
|
||||
ttl, err := s.TokenTTL()
|
||||
if err != nil {
|
||||
v.secret = nil
|
||||
continue
|
||||
}
|
||||
v.secret = s
|
||||
retryDelay = ttl / 2
|
||||
time.Sleep(retryDelay)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Generates a random plain text key, sealed plain text key from
|
||||
// Vault. It returns the plaintext key and sealed plaintext key on success
|
||||
// authenticate logs the app to vault, and starts the auto renewer
|
||||
// before secret expires
|
||||
func (v *vaultService) authenticate() (err error) {
|
||||
payload := map[string]interface{}{
|
||||
"role_id": v.config.Auth.AppRole.ID,
|
||||
"secret_id": v.config.Auth.AppRole.Secret,
|
||||
}
|
||||
var tokenID string
|
||||
var ttl time.Duration
|
||||
var secret *vault.Secret
|
||||
secret, err = v.client.Logical().Write("auth/approle/login", payload)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if secret == nil {
|
||||
err = ErrKMSAuthLogin
|
||||
return
|
||||
}
|
||||
|
||||
tokenID, err = secret.TokenID()
|
||||
if err != nil {
|
||||
err = ErrKMSAuthLogin
|
||||
return
|
||||
}
|
||||
ttl, err = secret.TokenTTL()
|
||||
if err != nil {
|
||||
err = ErrKMSAuthLogin
|
||||
return
|
||||
}
|
||||
v.client.SetToken(tokenID)
|
||||
v.secret = secret
|
||||
v.leaseDuration = ttl
|
||||
return
|
||||
}
|
||||
|
||||
// GenerateKey returns a new plaintext key, generated by the KMS,
|
||||
// and a sealed version of this plaintext key encrypted using the
|
||||
// named key referenced by keyID. It also binds the generated key
|
||||
// cryptographically to the provided context.
|
||||
func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
contextStream := new(bytes.Buffer)
|
||||
ctx.WriteTo(contextStream)
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
}
|
||||
s, err1 := v.client.Logical().Write(v.genDataKeyEndpoint(keyID), payload)
|
||||
|
||||
if err1 != nil {
|
||||
return key, sealedKey, err1
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/datakey/plaintext/%s", keyID), payload)
|
||||
if err != nil {
|
||||
return key, sealedKey, err
|
||||
}
|
||||
sealKey := s.Data["ciphertext"].(string)
|
||||
plainKey, err := base64.StdEncoding.DecodeString(s.Data["plaintext"].(string))
|
||||
if err != nil {
|
||||
return key, sealedKey, err1
|
||||
return key, sealedKey, err
|
||||
}
|
||||
copy(key[:], []byte(plainKey))
|
||||
return key, []byte(sealKey), nil
|
||||
}
|
||||
|
||||
// unsealKMSKey unseals the sealedKey using the Vault master key
|
||||
// referenced by the keyID. The plain text key is returned on success.
|
||||
// UnsealKey returns the decrypted sealedKey as plaintext key.
|
||||
// Therefore it sends the sealedKey to the KMS which decrypts
|
||||
// it using the named key referenced by keyID and responses with
|
||||
// the plaintext key.
|
||||
//
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
contextStream := new(bytes.Buffer)
|
||||
ctx.WriteTo(contextStream)
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
}
|
||||
s, err1 := v.client.Logical().Write(v.decryptEndpoint(keyID), payload)
|
||||
if err1 != nil {
|
||||
return key, err1
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/decrypt/%s", keyID), payload)
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
base64Key := s.Data["plaintext"].(string)
|
||||
plainKey, err1 := base64.StdEncoding.DecodeString(base64Key)
|
||||
if err1 != nil {
|
||||
return key, err1
|
||||
plainKey, err := base64.StdEncoding.DecodeString(base64Key)
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
copy(key[:], []byte(plainKey))
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
)
|
||||
|
||||
@@ -92,7 +91,7 @@ func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObj
|
||||
appendFileMap: make(map[string]*fsAppendFile),
|
||||
}
|
||||
|
||||
go fsObjects.cleanupStaleMultipartUploads(context.Background(), globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
|
||||
go fsObjects.cleanupStaleMultipartUploads(context.Background(), GlobalMultipartCleanupInterval, GlobalMultipartExpiry, GlobalServiceDoneCh)
|
||||
|
||||
cacheFS := cacheFSObjects{
|
||||
FSObjects: fsObjects,
|
||||
@@ -159,7 +158,7 @@ func (cfs *cacheFSObjects) purgeTrash() {
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-globalServiceDoneCh:
|
||||
case <-GlobalServiceDoneCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
trashPath := path.Join(cfs.fsPath, minioMetaBucket, cacheTrashDir)
|
||||
@@ -258,7 +257,7 @@ func (cfs *cacheFSObjects) IsOnline() bool {
|
||||
}
|
||||
|
||||
// Caches the object to disk
|
||||
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) error {
|
||||
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) error {
|
||||
if cfs.diskUsageHigh() {
|
||||
select {
|
||||
case cfs.purgeChan <- struct{}{}:
|
||||
@@ -275,7 +274,7 @@ func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data
|
||||
return pErr
|
||||
}
|
||||
}
|
||||
_, err := cfs.PutObject(ctx, bucket, object, data, metadata, opts)
|
||||
_, err := cfs.PutObject(ctx, bucket, object, data, opts)
|
||||
// if err is due to disk being offline , mark cache drive as offline
|
||||
if IsErr(err, baseErrs...) {
|
||||
cfs.setOnline(false)
|
||||
@@ -301,7 +300,8 @@ func (cfs *cacheFSObjects) Exists(ctx context.Context, bucket, object string) bo
|
||||
|
||||
// Identical to fs PutObject operation except that it uses ETag in metadata
|
||||
// headers.
|
||||
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
|
||||
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
|
||||
data := r.Reader
|
||||
fs := cfs.FSObjects
|
||||
// Lock the object.
|
||||
objectLock := fs.nsMutex.NewNSLock(bucket, object)
|
||||
@@ -312,7 +312,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
|
||||
|
||||
// No metadata is set, allocate a new one.
|
||||
meta := make(map[string]string)
|
||||
for k, v := range metadata {
|
||||
for k, v := range opts.UserDefined {
|
||||
meta[k] = v
|
||||
}
|
||||
|
||||
@@ -354,7 +354,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
|
||||
}
|
||||
|
||||
// Validate input data size and it can never be less than zero.
|
||||
if data.Size() < 0 {
|
||||
if data.Size() < -1 {
|
||||
logger.LogIf(ctx, errInvalidArgument)
|
||||
return ObjectInfo{}, errInvalidArgument
|
||||
}
|
||||
@@ -438,7 +438,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
|
||||
// Implements S3 compatible initiate multipart API. Operation here is identical
|
||||
// to fs backend implementation - with the exception that cache FS uses the uploadID
|
||||
// generated on the backend
|
||||
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string, uploadID string, opts ObjectOptions) (string, error) {
|
||||
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, uploadID string, opts ObjectOptions) (string, error) {
|
||||
if cfs.diskUsageHigh() {
|
||||
select {
|
||||
case cfs.purgeChan <- struct{}{}:
|
||||
@@ -472,7 +472,7 @@ func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, objec
|
||||
|
||||
// Initialize fs.json values.
|
||||
fsMeta := newFSMetaV1()
|
||||
fsMeta.Meta = meta
|
||||
fsMeta.Meta = opts.UserDefined
|
||||
|
||||
fsMetaBytes, err := json.Marshal(fsMeta)
|
||||
if err != nil {
|
||||
|
||||
@@ -38,8 +38,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// disk cache needs to have cacheSizeMultiplier * object size space free for a cache entry to be created.
|
||||
cacheSizeMultiplier = 100
|
||||
// disk cache needs to have object size space free for a cache entry to be created.
|
||||
cacheTrashDir = "trash"
|
||||
cacheCleanupInterval = 10 // in minutes
|
||||
)
|
||||
@@ -58,19 +57,19 @@ type cacheObjects struct {
|
||||
// file path patterns to exclude from cache
|
||||
exclude []string
|
||||
// Object functions pointing to the corresponding functions of backend implementation.
|
||||
GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType) (gr *GetObjectReader, err error)
|
||||
GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
||||
GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||
GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
PutObjectFn func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
PutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
DeleteObjectFn func(ctx context.Context, bucket, object string) error
|
||||
ListObjectsFn func(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
|
||||
ListObjectsV2Fn func(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
||||
ListBucketsFn func(ctx context.Context) (buckets []BucketInfo, err error)
|
||||
GetBucketInfoFn func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error)
|
||||
NewMultipartUploadFn func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
|
||||
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error)
|
||||
NewMultipartUploadFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error)
|
||||
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||
AbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string) error
|
||||
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
||||
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
DeleteBucketFn func(ctx context.Context, bucket string) error
|
||||
}
|
||||
|
||||
@@ -90,17 +89,17 @@ type CacheObjectLayer interface {
|
||||
ListBuckets(ctx context.Context) (buckets []BucketInfo, err error)
|
||||
DeleteBucket(ctx context.Context, bucket string) error
|
||||
// Object operations.
|
||||
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType) (gr *GetObjectReader, err error)
|
||||
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
DeleteObject(ctx context.Context, bucket, object string) error
|
||||
|
||||
// Multipart operations.
|
||||
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
|
||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error)
|
||||
NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error)
|
||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
|
||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
|
||||
// Storage operations.
|
||||
StorageInfo(ctx context.Context) CacheStorageInfo
|
||||
@@ -183,44 +182,40 @@ func (c cacheObjects) getMetadata(objInfo ObjectInfo) map[string]string {
|
||||
return metadata
|
||||
}
|
||||
|
||||
func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType) (gr *GetObjectReader, err error) {
|
||||
|
||||
bkReader, bkErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock)
|
||||
|
||||
if c.isCacheExclude(bucket, object) || !bkReader.ObjInfo.IsCacheable() {
|
||||
return bkReader, bkErr
|
||||
func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
|
||||
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
|
||||
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return bkReader, bkErr
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
|
||||
backendDown := backendDownError(bkErr)
|
||||
if bkErr != nil && !backendDown {
|
||||
cacheReader, cacheErr := dcache.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
||||
|
||||
objInfo, err := c.GetObjectInfoFn(ctx, bucket, object, opts)
|
||||
if backendDownError(err) && cacheErr == nil {
|
||||
return cacheReader, nil
|
||||
} else if err != nil {
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
// Delete the cached entry if backend object was deleted.
|
||||
// Delete cached entry if backend object was deleted.
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
}
|
||||
return nil, bkErr
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !backendDown && filterFromCache(bkReader.ObjInfo.UserDefined) {
|
||||
return bkReader, bkErr
|
||||
if !objInfo.IsCacheable() || filterFromCache(objInfo.UserDefined) {
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
|
||||
if cacheReader, cacheErr := dcache.GetObjectNInfo(ctx, bucket, object, rs, h, lockType); cacheErr == nil {
|
||||
if backendDown {
|
||||
// If the backend is down, serve the request from cache.
|
||||
return cacheReader, nil
|
||||
}
|
||||
|
||||
if cacheReader.ObjInfo.ETag == bkReader.ObjInfo.ETag && !isStaleCache(bkReader.ObjInfo) {
|
||||
if cacheErr == nil {
|
||||
if cacheReader.ObjInfo.ETag == objInfo.ETag && !isStaleCache(objInfo) {
|
||||
// Object is not stale, so serve from cache
|
||||
return cacheReader, nil
|
||||
}
|
||||
|
||||
cacheReader.Close()
|
||||
// Object is stale, so delete from cache
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
}
|
||||
@@ -230,13 +225,13 @@ func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||
|
||||
if rs != nil {
|
||||
// We don't cache partial objects.
|
||||
return bkReader, bkErr
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
if !dcache.diskAvailable(bkReader.ObjInfo.Size * cacheSizeMultiplier) {
|
||||
// cache only objects < 1/100th of disk capacity
|
||||
return bkReader, bkErr
|
||||
if !dcache.diskAvailable(objInfo.Size) {
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
|
||||
bkReader, bkErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
if bkErr != nil {
|
||||
return nil, bkErr
|
||||
}
|
||||
@@ -244,15 +239,14 @@ func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
teeReader := io.TeeReader(bkReader, pipeWriter)
|
||||
hashReader, herr := hash.NewReader(pipeReader, bkReader.ObjInfo.Size, "", "")
|
||||
hashReader, herr := hash.NewReader(pipeReader, bkReader.ObjInfo.Size, "", "", bkReader.ObjInfo.Size)
|
||||
if herr != nil {
|
||||
bkReader.Close()
|
||||
return nil, herr
|
||||
}
|
||||
|
||||
go func() {
|
||||
opts := ObjectOptions{}
|
||||
putErr := dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(bkReader.ObjInfo), opts)
|
||||
putErr := dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: c.getMetadata(bkReader.ObjInfo)})
|
||||
// close the write end of the pipe, so the error gets
|
||||
// propagated to getObjReader
|
||||
pipeWriter.CloseWithError(putErr)
|
||||
@@ -260,8 +254,7 @@ func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||
|
||||
cleanupBackend := func() { bkReader.Close() }
|
||||
cleanupPipe := func() { pipeReader.Close() }
|
||||
gr = NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, cleanupBackend, cleanupPipe)
|
||||
return gr, nil
|
||||
return NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, opts.CheckCopyPrecondFn, cleanupBackend, cleanupPipe)
|
||||
}
|
||||
|
||||
// Uses cached-object to serve the request. If object is not cached it serves the request from the backend and also
|
||||
@@ -289,6 +282,10 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
|
||||
return err
|
||||
}
|
||||
|
||||
if !backendDown && !objInfo.IsCacheable() {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
|
||||
if !backendDown && filterFromCache(objInfo.UserDefined) {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
@@ -304,28 +301,26 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
|
||||
}
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
}
|
||||
if startOffset != 0 || length != objInfo.Size {
|
||||
if startOffset != 0 || (length > 0 && length != objInfo.Size) {
|
||||
// We don't cache partial objects.
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
if !dcache.diskAvailable(objInfo.Size * cacheSizeMultiplier) {
|
||||
// cache only objects < 1/100th of disk capacity
|
||||
if !dcache.diskAvailable(objInfo.Size) {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "")
|
||||
hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "", objInfo.Size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
if err = GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag, opts); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
|
||||
gerr := GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag, opts)
|
||||
pipeWriter.CloseWithError(gerr) // Close writer explicitly signaling we wrote all data.
|
||||
}()
|
||||
err = dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(objInfo), opts)
|
||||
|
||||
opts.UserDefined = c.getMetadata(objInfo)
|
||||
err = dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -649,42 +644,43 @@ func (c cacheObjects) isCacheExclude(bucket, object string) bool {
|
||||
}
|
||||
|
||||
// PutObject - caches the uploaded object for single Put operations
|
||||
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
putObjectFn := c.PutObjectFn
|
||||
data := r.Reader
|
||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return putObjectFn(ctx, bucket, object, r, metadata, opts)
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
}
|
||||
size := r.Size()
|
||||
|
||||
// fetch from backend if there is no space on cache drive
|
||||
if !dcache.diskAvailable(size * cacheSizeMultiplier) {
|
||||
return putObjectFn(ctx, bucket, object, r, metadata, opts)
|
||||
if !dcache.diskAvailable(size) {
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
}
|
||||
// fetch from backend if cache exclude pattern or cache-control
|
||||
// directive set to exclude
|
||||
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
|
||||
if c.isCacheExclude(bucket, object) || filterFromCache(opts.UserDefined) {
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
return putObjectFn(ctx, bucket, object, r, metadata, opts)
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
}
|
||||
objInfo = ObjectInfo{}
|
||||
// Initialize pipe to stream data to backend
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, size, r.MD5HexString(), r.SHA256HexString())
|
||||
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
// Initialize pipe to stream data to cache
|
||||
rPipe, wPipe := io.Pipe()
|
||||
cHashReader, err := hash.NewReader(rPipe, size, r.MD5HexString(), r.SHA256HexString())
|
||||
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
oinfoCh := make(chan ObjectInfo)
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
oinfo, perr := putObjectFn(ctx, bucket, object, hashReader, metadata, opts)
|
||||
oinfo, perr := putObjectFn(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), opts)
|
||||
if perr != nil {
|
||||
pipeWriter.CloseWithError(perr)
|
||||
wPipe.CloseWithError(perr)
|
||||
@@ -697,14 +693,14 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if err = dcache.Put(ctx, bucket, object, cHashReader, metadata, opts); err != nil {
|
||||
if err = dcache.Put(ctx, bucket, object, NewPutObjReader(cHashReader, nil, nil), opts); err != nil {
|
||||
wPipe.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
mwriter := io.MultiWriter(pipeWriter, wPipe)
|
||||
_, err = io.Copy(mwriter, r)
|
||||
_, err = io.Copy(mwriter, data)
|
||||
if err != nil {
|
||||
err = <-errCh
|
||||
return objInfo, err
|
||||
@@ -716,68 +712,69 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
|
||||
}
|
||||
|
||||
// NewMultipartUpload - Starts a new multipart upload operation to backend and cache.
|
||||
func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
|
||||
func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
|
||||
newMultipartUploadFn := c.NewMultipartUploadFn
|
||||
|
||||
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
|
||||
return newMultipartUploadFn(ctx, bucket, object, metadata, opts)
|
||||
if c.isCacheExclude(bucket, object) || filterFromCache(opts.UserDefined) {
|
||||
return newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return newMultipartUploadFn(ctx, bucket, object, metadata, opts)
|
||||
return newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
uploadID, err = newMultipartUploadFn(ctx, bucket, object, metadata, opts)
|
||||
uploadID, err = newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// create new multipart upload in cache with same uploadID
|
||||
dcache.NewMultipartUpload(ctx, bucket, object, metadata, uploadID, opts)
|
||||
dcache.NewMultipartUpload(ctx, bucket, object, uploadID, opts)
|
||||
return uploadID, err
|
||||
}
|
||||
|
||||
// PutObjectPart - uploads part to backend and cache simultaneously.
|
||||
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
|
||||
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||
data := r.Reader
|
||||
putObjectPartFn := c.PutObjectPartFn
|
||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
|
||||
}
|
||||
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
|
||||
}
|
||||
|
||||
// make sure cache has at least cacheSizeMultiplier * size available
|
||||
// make sure cache has at least size space available
|
||||
size := data.Size()
|
||||
if !dcache.diskAvailable(size * cacheSizeMultiplier) {
|
||||
if !dcache.diskAvailable(size) {
|
||||
select {
|
||||
case dcache.purgeChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
|
||||
}
|
||||
|
||||
info = PartInfo{}
|
||||
// Initialize pipe to stream data to backend
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString())
|
||||
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Initialize pipe to stream data to cache
|
||||
rPipe, wPipe := io.Pipe()
|
||||
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString())
|
||||
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
pinfoCh := make(chan PartInfo)
|
||||
errorCh := make(chan error)
|
||||
go func() {
|
||||
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, hashReader, opts)
|
||||
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, NewPutObjReader(hashReader, nil, nil), opts)
|
||||
if err != nil {
|
||||
close(pinfoCh)
|
||||
pipeWriter.CloseWithError(err)
|
||||
@@ -789,7 +786,7 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
|
||||
pinfoCh <- info
|
||||
}()
|
||||
go func() {
|
||||
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, cHashReader, opts); perr != nil {
|
||||
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, NewPutObjReader(cHashReader, nil, nil), opts); perr != nil {
|
||||
wPipe.CloseWithError(perr)
|
||||
return
|
||||
}
|
||||
@@ -831,25 +828,25 @@ func (c cacheObjects) AbortMultipartUpload(ctx context.Context, bucket, object,
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - completes multipart upload operation on backend and cache.
|
||||
func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
||||
func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
completeMultipartUploadFn := c.CompleteMultipartUploadFn
|
||||
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
}
|
||||
|
||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
}
|
||||
// perform backend operation
|
||||
objInfo, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
|
||||
objInfo, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// create new multipart upload in cache with same uploadID
|
||||
dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
|
||||
dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -971,8 +968,11 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
|
||||
GetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
||||
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts)
|
||||
},
|
||||
PutObjectFn: func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().PutObject(ctx, bucket, object, data, metadata, opts)
|
||||
GetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
||||
},
|
||||
PutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().PutObject(ctx, bucket, object, data, opts)
|
||||
},
|
||||
DeleteObjectFn: func(ctx context.Context, bucket, object string) error {
|
||||
return newObjectLayerFn().DeleteObject(ctx, bucket, object)
|
||||
@@ -989,17 +989,17 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
|
||||
GetBucketInfoFn: func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
||||
return newObjectLayerFn().GetBucketInfo(ctx, bucket)
|
||||
},
|
||||
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
|
||||
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, metadata, opts)
|
||||
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
|
||||
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, opts)
|
||||
},
|
||||
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
|
||||
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
},
|
||||
AbortMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string) error {
|
||||
return newObjectLayerFn().AbortMultipartUpload(ctx, bucket, object, uploadID)
|
||||
},
|
||||
CompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
|
||||
CompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
},
|
||||
DeleteBucketFn: func(ctx context.Context, bucket string) error {
|
||||
return newObjectLayerFn().DeleteBucket(ctx, bucket)
|
||||
|
||||
@@ -134,7 +134,7 @@ func TestCacheExclusion(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cobj := cobjects.(*cacheObjects)
|
||||
globalServiceDoneCh <- struct{}{}
|
||||
GlobalServiceDoneCh <- struct{}{}
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
objectName string
|
||||
@@ -192,14 +192,13 @@ func TestDiskCache(t *testing.T) {
|
||||
objInfo.ContentType = contentType
|
||||
objInfo.ETag = etag
|
||||
objInfo.UserDefined = httpMeta
|
||||
opts := ObjectOptions{}
|
||||
|
||||
var opts ObjectOptions
|
||||
byteReader := bytes.NewReader([]byte(content))
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta, opts)
|
||||
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: httpMeta})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -270,17 +269,17 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
||||
opts := ObjectOptions{}
|
||||
|
||||
byteReader := bytes.NewReader([]byte(content))
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !cache.diskAvailable(int64(size)) {
|
||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta, opts)
|
||||
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: httpMeta})
|
||||
if err != errDiskFull {
|
||||
t.Fatal("Cache max-use limit violated.")
|
||||
}
|
||||
} else {
|
||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta, opts)
|
||||
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: httpMeta})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
234
cmd/dummy-handlers.go
Normal file
234
cmd/dummy-handlers.go
Normal file
@@ -0,0 +1,234 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018, 2019 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
// Data types used for returning dummy tagging XML.
|
||||
// These variables shouldn't be used elsewhere.
|
||||
// They are only defined to be used in this file alone.
|
||||
|
||||
type tagging struct {
|
||||
XMLName xml.Name `xml:"Tagging"`
|
||||
TagSet tagSet `xml:"TagSet"`
|
||||
}
|
||||
|
||||
type tagSet struct {
|
||||
Tag []tagElem `xml:"Tag"`
|
||||
}
|
||||
|
||||
type tagElem struct {
|
||||
Key string `xml:"Key"`
|
||||
Value string `xml:"Value"`
|
||||
}
|
||||
|
||||
// GetBucketWebsite - GET bucket website, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketVersioning - GET bucket versioning, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketAccelerate - GET bucket accelerate, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketAccelerateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketRequestPaymentHandler - GET bucket requestPayment, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketRequestPaymentHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketLoggingHandler - GET bucket logging, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketLoggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketLifecycleHandler - GET bucket lifecycle, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketReplicationHandler - GET bucket replication, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketReplicationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// DeleteBucketTaggingHandler - DELETE bucket tagging, a dummy api
|
||||
func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// DeleteBucketWebsiteHandler - DELETE bucket website, a dummy api
|
||||
func (api objectAPIHandlers) DeleteBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
type allowedMethod string
|
||||
|
||||
// Define strings
|
||||
const (
|
||||
GET allowedMethod = http.MethodGet
|
||||
PUT allowedMethod = http.MethodPut
|
||||
HEAD allowedMethod = http.MethodHead
|
||||
POST allowedMethod = http.MethodPost
|
||||
DELETE allowedMethod = http.MethodDelete
|
||||
)
|
||||
|
||||
// GetBucketCorsHandler - GET bucket cors, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketCorsHandler")
|
||||
|
||||
type corsRule struct {
|
||||
AllowedHeaders []string `xml:"AllowedHeaders"`
|
||||
AllowedMethods []allowedMethod `xml:"AllowedMethod"`
|
||||
AllowedOrigins []string `xml:"AllowedOrigin"`
|
||||
ExposeHeaders []string `xml:"ExposeHeader"`
|
||||
MaxAgeSeconds int64 `xml:"MaxAgeSeconds"`
|
||||
}
|
||||
|
||||
type corsConfiguration struct {
|
||||
XMLName xml.Name `xml:"CORSConfiguration"`
|
||||
CorsRule []corsRule `xml:"CORSRule"`
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getBucketCors if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if bucket exists, before proceeding further...
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
cors := &corsConfiguration{}
|
||||
if err := xml.NewEncoder(w).Encode(cors); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketTaggingHandler - GET bucket tagging, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketTagging")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getBucketTagging if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if bucket exists, before proceeding further...
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
tags := &tagging{}
|
||||
tags.TagSet.Tag = append(tags.TagSet.Tag, tagElem{})
|
||||
|
||||
if err := xml.NewEncoder(w).Encode(tags); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetObjectTaggingHandler - GET object tagging, a dummy api
|
||||
func (api objectAPIHandlers) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetObjectTagging")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getObjectTagging if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if object exists, before proceeding further...
|
||||
_, err := objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
tags := &tagging{}
|
||||
tags.TagSet.Tag = append(tags.TagSet.Tag, tagElem{})
|
||||
|
||||
if err := xml.NewEncoder(w).Encode(tags); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
@@ -1,161 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
type DummyObjectLayer struct{}
|
||||
|
||||
func (api *DummyObjectLayer) Shutdown(context.Context) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) StorageInfo(context.Context) (si StorageInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) MakeBucketWithLocation(ctx context.Context, bucket string, location string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) DeleteBucket(ctx context.Context, bucket string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lock LockType) (gr *GetObjectReader, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) DeleteObject(ctx context.Context, bucket, object string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (info PartInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ReloadFormat(ctx context.Context, dryRun bool) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) HealFormat(ctx context.Context, dryRun bool) (item madmin.HealResultItem, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) HealBucket(ctx context.Context, bucket string, dryRun bool) (items []madmin.HealResultItem, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) HealObject(ctx context.Context, bucket, object string, dryRun bool) (item madmin.HealResultItem, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (info ListObjectsInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) SetBucketPolicy(context.Context, string, *policy.Policy) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) GetBucketPolicy(context.Context, string) (bucketPolicy *policy.Policy, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) RefreshBucketPolicy(context.Context, string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) DeleteBucketPolicy(context.Context, string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) IsNotificationSupported() (b bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) IsEncryptionSupported() (b bool) {
|
||||
return
|
||||
}
|
||||
@@ -22,12 +22,14 @@ import (
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/minio/minio-go/pkg/encrypt"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/ioutil"
|
||||
@@ -37,7 +39,6 @@ import (
|
||||
|
||||
var (
|
||||
// AWS errors for invalid SSE-C requests.
|
||||
errInsecureSSERequest = errors.New("SSE-C requests require TLS connections")
|
||||
errEncryptedObject = errors.New("The object was stored using a form of SSE")
|
||||
errInvalidSSEParameters = errors.New("The SSE-C key for key-rotation is not correct") // special access denied
|
||||
errKMSNotConfigured = errors.New("KMS not configured for a server side encrypted object")
|
||||
@@ -55,11 +56,11 @@ const (
|
||||
// SSEIVSize is the size of the IV data
|
||||
SSEIVSize = 32 // 32 bytes
|
||||
|
||||
// SSE dare package block size.
|
||||
sseDAREPackageBlockSize = 64 * 1024 // 64KiB bytes
|
||||
// SSEDAREPackageBlockSize - SSE dare package block size.
|
||||
SSEDAREPackageBlockSize = 64 * 1024 // 64KiB bytes
|
||||
|
||||
// SSE dare package meta padding bytes.
|
||||
sseDAREPackageMetaSize = 32 // 32 bytes
|
||||
// SSEDAREPackageMetaSize - SSE dare package meta padding bytes.
|
||||
SSEDAREPackageMetaSize = 32 // 32 bytes
|
||||
|
||||
)
|
||||
|
||||
@@ -80,16 +81,31 @@ func hasServerSideEncryptionHeader(header http.Header) bool {
|
||||
return crypto.S3.IsRequested(header) || crypto.SSEC.IsRequested(header)
|
||||
}
|
||||
|
||||
// isEncryptedMultipart returns true if the current object is
|
||||
// uploaded by the user using multipart mechanism:
|
||||
// initiate new multipart, upload part, complete upload
|
||||
func isEncryptedMultipart(objInfo ObjectInfo) bool {
|
||||
if len(objInfo.Parts) == 0 {
|
||||
return false
|
||||
}
|
||||
if !crypto.IsMultiPart(objInfo.UserDefined) {
|
||||
return false
|
||||
}
|
||||
for _, part := range objInfo.Parts {
|
||||
_, err := sio.DecryptedSize(uint64(part.Size))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Further check if this object is uploaded using multipart mechanism
|
||||
// by the user and it is not about XL internally splitting the
|
||||
// object into parts in PutObject()
|
||||
return !(objInfo.backendType == BackendErasure && len(objInfo.ETag) == 32)
|
||||
}
|
||||
|
||||
// ParseSSECopyCustomerRequest parses the SSE-C header fields of the provided request.
|
||||
// It returns the client provided key on success.
|
||||
func ParseSSECopyCustomerRequest(h http.Header, metadata map[string]string) (key []byte, err error) {
|
||||
if !globalIsSSL { // minio only supports HTTP or HTTPS requests not both at the same time
|
||||
// we cannot use r.TLS == nil here because Go's http implementation reflects on
|
||||
// the net.Conn and sets the TLS field of http.Request only if it's an tls.Conn.
|
||||
// Minio uses a BufConn (wrapping a tls.Conn) so the type check within the http package
|
||||
// will always fail -> r.TLS is always nil even for TLS requests.
|
||||
return nil, errInsecureSSERequest
|
||||
}
|
||||
if crypto.S3.IsEncrypted(metadata) && crypto.SSECopy.IsRequested(h) {
|
||||
return nil, crypto.ErrIncompatibleEncryptionMethod
|
||||
}
|
||||
@@ -106,13 +122,6 @@ func ParseSSECustomerRequest(r *http.Request) (key []byte, err error) {
|
||||
// ParseSSECustomerHeader parses the SSE-C header fields and returns
|
||||
// the client provided key on success.
|
||||
func ParseSSECustomerHeader(header http.Header) (key []byte, err error) {
|
||||
if !globalIsSSL { // minio only supports HTTP or HTTPS requests not both at the same time
|
||||
// we cannot use r.TLS == nil here because Go's http implementation reflects on
|
||||
// the net.Conn and sets the TLS field of http.Request only if it's an tls.Conn.
|
||||
// Minio uses a BufConn (wrapping a tls.Conn) so the type check within the http package
|
||||
// will always fail -> r.TLS is always nil even for TLS requests.
|
||||
return nil, errInsecureSSERequest
|
||||
}
|
||||
if crypto.S3.IsRequested(header) && crypto.SSEC.IsRequested(header) {
|
||||
return key, crypto.ErrIncompatibleEncryptionMethod
|
||||
}
|
||||
@@ -149,16 +158,40 @@ func rotateKey(oldKey []byte, newKey []byte, bucket, object string, metadata map
|
||||
sealedKey = objectKey.Seal(extKey, sealedKey.IV, crypto.SSEC.String(), bucket, object)
|
||||
crypto.SSEC.CreateMetadata(metadata, sealedKey)
|
||||
return nil
|
||||
case crypto.S3.IsEncrypted(metadata):
|
||||
if GlobalKMS == nil {
|
||||
return errKMSNotConfigured
|
||||
}
|
||||
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oldKey, err := GlobalKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var objectKey crypto.ObjectKey
|
||||
if err = objectKey.Unseal(oldKey, sealedKey, crypto.S3.String(), bucket, object); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newKey, encKey, err := GlobalKMS.GenerateKey(globalKMSKeyID, crypto.Context{bucket: path.Join(bucket, object)})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sealedKey = objectKey.Seal(newKey, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
|
||||
crypto.S3.CreateMetadata(metadata, globalKMSKeyID, encKey, sealedKey)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func newEncryptMetadata(key []byte, bucket, object string, metadata map[string]string, sseS3 bool) ([]byte, error) {
|
||||
var sealedKey crypto.SealedKey
|
||||
if sseS3 {
|
||||
if globalKMS == nil {
|
||||
if GlobalKMS == nil {
|
||||
return nil, errKMSNotConfigured
|
||||
}
|
||||
key, encKey, err := globalKMS.GenerateKey(globalKMSKeyID, crypto.Context{bucket: path.Join(bucket, object)})
|
||||
key, encKey, err := GlobalKMS.GenerateKey(globalKMSKeyID, crypto.Context{bucket: path.Join(bucket, object)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -174,21 +207,20 @@ func newEncryptMetadata(key []byte, bucket, object string, metadata map[string]s
|
||||
sealedKey = objectKey.Seal(extKey, crypto.GenerateIV(rand.Reader), crypto.SSEC.String(), bucket, object)
|
||||
crypto.SSEC.CreateMetadata(metadata, sealedKey)
|
||||
return objectKey[:], nil
|
||||
|
||||
}
|
||||
|
||||
func newEncryptReader(content io.Reader, key []byte, bucket, object string, metadata map[string]string, sseS3 bool) (io.Reader, error) {
|
||||
func newEncryptReader(content io.Reader, key []byte, bucket, object string, metadata map[string]string, sseS3 bool) (r io.Reader, encKey []byte, err error) {
|
||||
objectEncryptionKey, err := newEncryptMetadata(key, bucket, object, metadata, sseS3)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, encKey, err
|
||||
}
|
||||
|
||||
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20})
|
||||
if err != nil {
|
||||
return nil, crypto.ErrInvalidCustomerKey
|
||||
return nil, encKey, crypto.ErrInvalidCustomerKey
|
||||
}
|
||||
|
||||
return reader, nil
|
||||
return reader, objectEncryptionKey, nil
|
||||
}
|
||||
|
||||
// set new encryption metadata from http request headers for SSE-C and generated key from KMS in the case of
|
||||
@@ -210,19 +242,18 @@ func setEncryptionMetadata(r *http.Request, bucket, object string, metadata map[
|
||||
// EncryptRequest takes the client provided content and encrypts the data
|
||||
// with the client provided key. It also marks the object as client-side-encrypted
|
||||
// and sets the correct headers.
|
||||
func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, metadata map[string]string) (io.Reader, error) {
|
||||
func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, metadata map[string]string) (reader io.Reader, objEncKey []byte, err error) {
|
||||
|
||||
var (
|
||||
key []byte
|
||||
err error
|
||||
)
|
||||
if crypto.S3.IsRequested(r.Header) && crypto.SSEC.IsRequested(r.Header) {
|
||||
return nil, crypto.ErrIncompatibleEncryptionMethod
|
||||
return nil, objEncKey, crypto.ErrIncompatibleEncryptionMethod
|
||||
}
|
||||
if crypto.SSEC.IsRequested(r.Header) {
|
||||
key, err = ParseSSECustomerRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, objEncKey, err
|
||||
}
|
||||
}
|
||||
return newEncryptReader(content, key, bucket, object, metadata, crypto.S3.IsRequested(r.Header))
|
||||
@@ -249,7 +280,7 @@ func decryptObjectInfo(key []byte, bucket, object string, metadata map[string]st
|
||||
default:
|
||||
return nil, errObjectTampered
|
||||
case crypto.S3.IsEncrypted(metadata):
|
||||
if globalKMS == nil {
|
||||
if GlobalKMS == nil {
|
||||
return nil, errKMSNotConfigured
|
||||
}
|
||||
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(metadata)
|
||||
@@ -257,7 +288,7 @@ func decryptObjectInfo(key []byte, bucket, object string, metadata map[string]st
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extKey, err := globalKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
|
||||
extKey, err := GlobalKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -325,7 +356,7 @@ func DecryptRequestWithSequenceNumberR(client io.Reader, h http.Header, bucket,
|
||||
|
||||
// DecryptCopyRequestR - same as DecryptCopyRequest, but with a
|
||||
// Reader
|
||||
func DecryptCopyRequestR(client io.Reader, h http.Header, bucket, object string, metadata map[string]string) (io.Reader, error) {
|
||||
func DecryptCopyRequestR(client io.Reader, h http.Header, bucket, object string, seqNumber uint32, metadata map[string]string) (io.Reader, error) {
|
||||
var (
|
||||
key []byte
|
||||
err error
|
||||
@@ -336,7 +367,7 @@ func DecryptCopyRequestR(client io.Reader, h http.Header, bucket, object string,
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return newDecryptReader(client, key, bucket, object, 0, metadata)
|
||||
return newDecryptReader(client, key, bucket, object, seqNumber, metadata)
|
||||
}
|
||||
|
||||
func newDecryptReader(client io.Reader, key []byte, bucket, object string, seqNumber uint32, metadata map[string]string) (io.Reader, error) {
|
||||
@@ -358,17 +389,6 @@ func newDecryptReaderWithObjectKey(client io.Reader, objectEncryptionKey []byte,
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// GetEncryptedOffsetLength - returns encrypted offset and length
|
||||
// along with sequence number
|
||||
func GetEncryptedOffsetLength(startOffset, length int64, objInfo ObjectInfo) (seqNumber uint32, encStartOffset, encLength int64) {
|
||||
if len(objInfo.Parts) == 0 || !crypto.IsMultiPart(objInfo.UserDefined) {
|
||||
seqNumber, encStartOffset, encLength = getEncryptedSinglePartOffsetLength(startOffset, length, objInfo)
|
||||
return
|
||||
}
|
||||
seqNumber, encStartOffset, encLength = getEncryptedMultipartsOffsetLength(startOffset, length, objInfo)
|
||||
return
|
||||
}
|
||||
|
||||
// DecryptBlocksRequestR - same as DecryptBlocksRequest but with a
|
||||
// reader
|
||||
func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, offset,
|
||||
@@ -376,13 +396,12 @@ func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, offset,
|
||||
io.Reader, error) {
|
||||
|
||||
bucket, object := oi.Bucket, oi.Name
|
||||
|
||||
// Single part case
|
||||
if len(oi.Parts) == 0 || !crypto.IsMultiPart(oi.UserDefined) {
|
||||
if !isEncryptedMultipart(oi) {
|
||||
var reader io.Reader
|
||||
var err error
|
||||
if copySource {
|
||||
reader, err = DecryptCopyRequestR(inputReader, h, bucket, object, oi.UserDefined)
|
||||
reader, err = DecryptCopyRequestR(inputReader, h, bucket, object, seqNumber, oi.UserDefined)
|
||||
} else {
|
||||
reader, err = DecryptRequestWithSequenceNumberR(inputReader, h, bucket, object, seqNumber, oi.UserDefined)
|
||||
}
|
||||
@@ -392,8 +411,8 @@ func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, offset,
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
partDecRelOffset := int64(seqNumber) * sseDAREPackageBlockSize
|
||||
partEncRelOffset := int64(seqNumber) * (sseDAREPackageBlockSize + sseDAREPackageMetaSize)
|
||||
partDecRelOffset := int64(seqNumber) * SSEDAREPackageBlockSize
|
||||
partEncRelOffset := int64(seqNumber) * (SSEDAREPackageBlockSize + SSEDAREPackageMetaSize)
|
||||
|
||||
w := &DecryptBlocksReader{
|
||||
reader: inputReader,
|
||||
@@ -458,7 +477,7 @@ type DecryptBlocksReader struct {
|
||||
// Current part index
|
||||
partIndex int
|
||||
// Parts information
|
||||
parts []objectPartInfo
|
||||
parts []ObjectPartInfo
|
||||
header http.Header
|
||||
bucket, object string
|
||||
metadata map[string]string
|
||||
@@ -558,7 +577,6 @@ func (d *DecryptBlocksReader) Read(p []byte) (int, error) {
|
||||
|
||||
d.partDecRelOffset += int64(n1)
|
||||
}
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
@@ -574,7 +592,7 @@ type DecryptBlocksWriter struct {
|
||||
// Current part index
|
||||
partIndex int
|
||||
// Parts information
|
||||
parts []objectPartInfo
|
||||
parts []ObjectPartInfo
|
||||
req *http.Request
|
||||
bucket, object string
|
||||
metadata map[string]string
|
||||
@@ -708,7 +726,7 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, bucket, object stri
|
||||
var seqNumber uint32
|
||||
var encStartOffset, encLength int64
|
||||
|
||||
if len(objInfo.Parts) == 0 || !crypto.IsMultiPart(objInfo.UserDefined) {
|
||||
if !isEncryptedMultipart(objInfo) {
|
||||
seqNumber, encStartOffset, encLength = getEncryptedSinglePartOffsetLength(startOffset, length, objInfo)
|
||||
|
||||
var writer io.WriteCloser
|
||||
@@ -724,7 +742,8 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, bucket, object stri
|
||||
return writer, encStartOffset, encLength, nil
|
||||
}
|
||||
|
||||
seqNumber, encStartOffset, encLength = getEncryptedMultipartsOffsetLength(startOffset, length, objInfo)
|
||||
_, encStartOffset, encLength = getEncryptedMultipartsOffsetLength(startOffset, length, objInfo)
|
||||
|
||||
var partStartIndex int
|
||||
var partStartOffset = startOffset
|
||||
// Skip parts until final offset maps to a particular part offset.
|
||||
@@ -747,8 +766,8 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, bucket, object stri
|
||||
partStartOffset -= int64(decryptedSize)
|
||||
}
|
||||
|
||||
startSeqNum := partStartOffset / sseDAREPackageBlockSize
|
||||
partEncRelOffset := int64(startSeqNum) * (sseDAREPackageBlockSize + sseDAREPackageMetaSize)
|
||||
startSeqNum := partStartOffset / SSEDAREPackageBlockSize
|
||||
partEncRelOffset := int64(startSeqNum) * (SSEDAREPackageBlockSize + SSEDAREPackageMetaSize)
|
||||
|
||||
w := &DecryptBlocksWriter{
|
||||
writer: client,
|
||||
@@ -793,7 +812,6 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, bucket, object stri
|
||||
|
||||
// getEncryptedMultipartsOffsetLength - fetch sequence number, encrypted start offset and encrypted length.
|
||||
func getEncryptedMultipartsOffsetLength(offset, length int64, obj ObjectInfo) (uint32, int64, int64) {
|
||||
|
||||
// Calculate encrypted offset of a multipart object
|
||||
computeEncOffset := func(off int64, obj ObjectInfo) (seqNumber uint32, encryptedOffset int64, err error) {
|
||||
var curPartEndOffset uint64
|
||||
@@ -840,9 +858,9 @@ func getEncryptedMultipartsOffsetLength(offset, length int64, obj ObjectInfo) (u
|
||||
|
||||
// getEncryptedSinglePartOffsetLength - fetch sequence number, encrypted start offset and encrypted length.
|
||||
func getEncryptedSinglePartOffsetLength(offset, length int64, objInfo ObjectInfo) (seqNumber uint32, encOffset int64, encLength int64) {
|
||||
onePkgSize := int64(sseDAREPackageBlockSize + sseDAREPackageMetaSize)
|
||||
onePkgSize := int64(SSEDAREPackageBlockSize + SSEDAREPackageMetaSize)
|
||||
|
||||
seqNumber = uint32(offset / sseDAREPackageBlockSize)
|
||||
seqNumber = uint32(offset / SSEDAREPackageBlockSize)
|
||||
encOffset = int64(seqNumber) * onePkgSize
|
||||
// The math to compute the encrypted length is always
|
||||
// originalLength i.e (offset+length-1) to be divided under
|
||||
@@ -850,10 +868,10 @@ func getEncryptedSinglePartOffsetLength(offset, length int64, objInfo ObjectInfo
|
||||
// block. This is then multiplied by final package size which
|
||||
// is basically 64KiB + 32. Finally negate the encrypted offset
|
||||
// to get the final encrypted length on disk.
|
||||
encLength = ((offset+length)/sseDAREPackageBlockSize)*onePkgSize - encOffset
|
||||
encLength = ((offset+length)/SSEDAREPackageBlockSize)*onePkgSize - encOffset
|
||||
|
||||
// Check for the remainder, to figure if we need one extract package to read from.
|
||||
if (offset+length)%sseDAREPackageBlockSize > 0 {
|
||||
if (offset+length)%SSEDAREPackageBlockSize > 0 {
|
||||
encLength += onePkgSize
|
||||
}
|
||||
|
||||
@@ -870,7 +888,7 @@ func (o *ObjectInfo) DecryptedSize() (int64, error) {
|
||||
if !crypto.IsEncrypted(o.UserDefined) {
|
||||
return 0, errors.New("Cannot compute decrypted size of an unencrypted object")
|
||||
}
|
||||
if len(o.Parts) == 0 || !crypto.IsMultiPart(o.UserDefined) {
|
||||
if !isEncryptedMultipart(*o) {
|
||||
size, err := sio.DecryptedSize(uint64(o.Size))
|
||||
if err != nil {
|
||||
err = errObjectTampered // assign correct error type
|
||||
@@ -889,6 +907,64 @@ func (o *ObjectInfo) DecryptedSize() (int64, error) {
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// For encrypted objects, the ETag sent by client if available
|
||||
// is stored in encrypted form in the backend. Decrypt the ETag
|
||||
// if ETag was previously encrypted.
|
||||
func getDecryptedETag(headers http.Header, objInfo ObjectInfo, copySource bool) (decryptedETag string) {
|
||||
var (
|
||||
key [32]byte
|
||||
err error
|
||||
)
|
||||
// If ETag is contentMD5Sum return it as is.
|
||||
if len(objInfo.ETag) == 32 {
|
||||
return objInfo.ETag
|
||||
}
|
||||
|
||||
if crypto.IsMultiPart(objInfo.UserDefined) {
|
||||
return objInfo.ETag
|
||||
}
|
||||
if crypto.SSECopy.IsRequested(headers) {
|
||||
key, err = crypto.SSECopy.ParseHTTP(headers)
|
||||
if err != nil {
|
||||
return objInfo.ETag
|
||||
}
|
||||
}
|
||||
// As per AWS S3 Spec, ETag for SSE-C encrypted objects need not be MD5Sum of the data.
|
||||
// Since server side copy with same source and dest just replaces the ETag, we save
|
||||
// encrypted content MD5Sum as ETag for both SSE-C and SSE-S3, we standardize the ETag
|
||||
//encryption across SSE-C and SSE-S3, and only return last 32 bytes for SSE-C
|
||||
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) && !copySource {
|
||||
return objInfo.ETag[len(objInfo.ETag)-32:]
|
||||
}
|
||||
|
||||
objectEncryptionKey, err := decryptObjectInfo(key[:], objInfo.Bucket, objInfo.Name, objInfo.UserDefined)
|
||||
if err != nil {
|
||||
return objInfo.ETag
|
||||
}
|
||||
return tryDecryptETag(objectEncryptionKey, objInfo.ETag, false)
|
||||
}
|
||||
|
||||
// helper to decrypt Etag given object encryption key and encrypted ETag
|
||||
func tryDecryptETag(key []byte, encryptedETag string, ssec bool) string {
|
||||
// ETag for SSE-C encrypted objects need not be content MD5Sum.While encrypted
|
||||
// md5sum is stored internally, return just the last 32 bytes of hex-encoded and
|
||||
// encrypted md5sum string for SSE-C
|
||||
if ssec {
|
||||
return encryptedETag[len(encryptedETag)-32:]
|
||||
}
|
||||
var objectKey crypto.ObjectKey
|
||||
copy(objectKey[:], key)
|
||||
encBytes, err := hex.DecodeString(encryptedETag)
|
||||
if err != nil {
|
||||
return encryptedETag
|
||||
}
|
||||
etagBytes, err := objectKey.UnsealETag(encBytes)
|
||||
if err != nil {
|
||||
return encryptedETag
|
||||
}
|
||||
return hex.EncodeToString(etagBytes)
|
||||
}
|
||||
|
||||
// GetDecryptedRange - To decrypt the range (off, length) of the
|
||||
// decrypted object stream, we need to read the range (encOff,
|
||||
// encLength) of the encrypted object stream to decrypt it, and
|
||||
@@ -910,26 +986,29 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
|
||||
}
|
||||
|
||||
// Assemble slice of (decrypted) part sizes in `sizes`
|
||||
var sizes []int64
|
||||
var decObjSize int64 // decrypted total object size
|
||||
var partSize uint64
|
||||
partSize, err = sio.DecryptedSize(uint64(o.Size))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
sizes := []int64{int64(partSize)}
|
||||
decObjSize = sizes[0]
|
||||
if crypto.IsMultiPart(o.UserDefined) {
|
||||
if isEncryptedMultipart(*o) {
|
||||
sizes = make([]int64, len(o.Parts))
|
||||
decObjSize = 0
|
||||
for i, part := range o.Parts {
|
||||
var partSize uint64
|
||||
partSize, err = sio.DecryptedSize(uint64(part.Size))
|
||||
if err != nil {
|
||||
err = errObjectTampered
|
||||
return
|
||||
}
|
||||
t := int64(partSize)
|
||||
sizes[i] = t
|
||||
decObjSize += t
|
||||
sizes[i] = int64(partSize)
|
||||
decObjSize += int64(partSize)
|
||||
}
|
||||
} else {
|
||||
var partSize uint64
|
||||
partSize, err = sio.DecryptedSize(uint64(o.Size))
|
||||
if err != nil {
|
||||
err = errObjectTampered
|
||||
return
|
||||
}
|
||||
sizes = []int64{int64(partSize)}
|
||||
decObjSize = sizes[0]
|
||||
}
|
||||
|
||||
var off, length int64
|
||||
@@ -963,11 +1042,11 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
|
||||
// partStart is always found in the loop above,
|
||||
// because off is validated.
|
||||
|
||||
sseDAREEncPackageBlockSize := int64(sseDAREPackageBlockSize + sseDAREPackageMetaSize)
|
||||
startPkgNum := (off - cumulativeSum) / sseDAREPackageBlockSize
|
||||
sseDAREEncPackageBlockSize := int64(SSEDAREPackageBlockSize + SSEDAREPackageMetaSize)
|
||||
startPkgNum := (off - cumulativeSum) / SSEDAREPackageBlockSize
|
||||
|
||||
// Now we can calculate the number of bytes to skip
|
||||
skipLen = (off - cumulativeSum) % sseDAREPackageBlockSize
|
||||
skipLen = (off - cumulativeSum) % SSEDAREPackageBlockSize
|
||||
|
||||
encOff = encCumulativeSum + startPkgNum*sseDAREEncPackageBlockSize
|
||||
// Locate the part containing the end of the required range
|
||||
@@ -984,7 +1063,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
|
||||
}
|
||||
// partEnd is always found in the loop above, because off and
|
||||
// length are validated.
|
||||
endPkgNum := (endOffset - cumulativeSum) / sseDAREPackageBlockSize
|
||||
endPkgNum := (endOffset - cumulativeSum) / SSEDAREPackageBlockSize
|
||||
// Compute endEncOffset with one additional DARE package (so
|
||||
// we read the package containing the last desired byte).
|
||||
endEncOffset := encCumulativeSum + (endPkgNum+1)*sseDAREEncPackageBlockSize
|
||||
@@ -1024,22 +1103,22 @@ func (o *ObjectInfo) EncryptedSize() int64 {
|
||||
// decryption succeeded.
|
||||
//
|
||||
// DecryptCopyObjectInfo also returns whether the object is encrypted or not.
|
||||
func DecryptCopyObjectInfo(info *ObjectInfo, headers http.Header) (apiErr APIErrorCode, encrypted bool) {
|
||||
func DecryptCopyObjectInfo(info *ObjectInfo, headers http.Header) (errCode APIErrorCode, encrypted bool) {
|
||||
// Directories are never encrypted.
|
||||
if info.IsDir {
|
||||
return ErrNone, false
|
||||
}
|
||||
if apiErr, encrypted = ErrNone, crypto.IsEncrypted(info.UserDefined); !encrypted && crypto.SSECopy.IsRequested(headers) {
|
||||
apiErr = ErrInvalidEncryptionParameters
|
||||
if errCode, encrypted = ErrNone, crypto.IsEncrypted(info.UserDefined); !encrypted && crypto.SSECopy.IsRequested(headers) {
|
||||
errCode = ErrInvalidEncryptionParameters
|
||||
} else if encrypted {
|
||||
if (!crypto.SSECopy.IsRequested(headers) && crypto.SSEC.IsEncrypted(info.UserDefined)) ||
|
||||
(crypto.SSECopy.IsRequested(headers) && crypto.S3.IsEncrypted(info.UserDefined)) {
|
||||
apiErr = ErrSSEEncryptedObject
|
||||
errCode = ErrSSEEncryptedObject
|
||||
return
|
||||
}
|
||||
var err error
|
||||
if info.Size, err = info.DecryptedSize(); err != nil {
|
||||
apiErr = toAPIErrorCode(err)
|
||||
errCode = toAPIErrorCode(context.Background(), err)
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -1052,7 +1131,7 @@ func DecryptCopyObjectInfo(info *ObjectInfo, headers http.Header) (apiErr APIErr
|
||||
// decryption succeeded.
|
||||
//
|
||||
// DecryptObjectInfo also returns whether the object is encrypted or not.
|
||||
func DecryptObjectInfo(info ObjectInfo, headers http.Header) (encrypted bool, err error) {
|
||||
func DecryptObjectInfo(info *ObjectInfo, headers http.Header) (encrypted bool, err error) {
|
||||
// Directories are never encrypted.
|
||||
if info.IsDir {
|
||||
return false, nil
|
||||
@@ -1071,6 +1150,123 @@ func DecryptObjectInfo(info ObjectInfo, headers http.Header) (encrypted bool, er
|
||||
return
|
||||
}
|
||||
_, err = info.DecryptedSize()
|
||||
|
||||
if crypto.IsEncrypted(info.UserDefined) && !crypto.IsMultiPart(info.UserDefined) {
|
||||
info.ETag = getDecryptedETag(headers, *info, false)
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// The customer key in the header is used by the gateway for encryption in the case of
|
||||
// s3 gateway double encryption. A new client key is derived from the customer provided
|
||||
// key to be sent to the s3 backend for encryption at the backend.
|
||||
func deriveClientKey(clientKey [32]byte, bucket, object string) [32]byte {
|
||||
var key [32]byte
|
||||
mac := hmac.New(sha256.New, clientKey[:])
|
||||
mac.Write([]byte(crypto.SSEC.String()))
|
||||
mac.Write([]byte(path.Join(bucket, object)))
|
||||
mac.Sum(key[:0])
|
||||
return key
|
||||
}
|
||||
|
||||
// set encryption options for pass through to backend in the case of gateway and UserDefined metadata
|
||||
func getDefaultOpts(header http.Header, copySource bool, metadata map[string]string) (opts ObjectOptions, err error) {
|
||||
var clientKey [32]byte
|
||||
var sse encrypt.ServerSide
|
||||
|
||||
if copySource {
|
||||
if crypto.SSECopy.IsRequested(header) {
|
||||
clientKey, err = crypto.SSECopy.ParseHTTP(header)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil {
|
||||
return
|
||||
}
|
||||
return ObjectOptions{ServerSideEncryption: encrypt.SSECopy(sse), UserDefined: metadata}, nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if crypto.SSEC.IsRequested(header) {
|
||||
clientKey, err = crypto.SSEC.ParseHTTP(header)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil {
|
||||
return
|
||||
}
|
||||
return ObjectOptions{ServerSideEncryption: sse, UserDefined: metadata}, nil
|
||||
}
|
||||
if crypto.S3.IsRequested(header) || (metadata != nil && crypto.S3.IsEncrypted(metadata)) {
|
||||
return ObjectOptions{ServerSideEncryption: encrypt.NewSSE(), UserDefined: metadata}, nil
|
||||
}
|
||||
return ObjectOptions{UserDefined: metadata}, nil
|
||||
}
|
||||
|
||||
// get ObjectOptions for GET calls from encryption headers
|
||||
func getOpts(ctx context.Context, r *http.Request, bucket, object string) (ObjectOptions, error) {
|
||||
var (
|
||||
encryption encrypt.ServerSide
|
||||
opts ObjectOptions
|
||||
)
|
||||
if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) {
|
||||
key, err := crypto.SSEC.ParseHTTP(r.Header)
|
||||
if err != nil {
|
||||
return opts, err
|
||||
}
|
||||
derivedKey := deriveClientKey(key, bucket, object)
|
||||
encryption, err = encrypt.NewSSEC(derivedKey[:])
|
||||
logger.CriticalIf(ctx, err)
|
||||
return ObjectOptions{ServerSideEncryption: encryption}, nil
|
||||
}
|
||||
// default case of passing encryption headers to backend
|
||||
return getDefaultOpts(r.Header, false, nil)
|
||||
}
|
||||
|
||||
// get ObjectOptions for PUT calls from encryption headers and metadata
|
||||
func putOpts(ctx context.Context, r *http.Request, bucket, object string, metadata map[string]string) (opts ObjectOptions, err error) {
|
||||
// In the case of multipart custom format, the metadata needs to be checked in addition to header to see if it
|
||||
// is SSE-S3 encrypted, primarily because S3 protocol does not require SSE-S3 headers in PutObjectPart calls
|
||||
if GlobalGatewaySSE.SSES3() && (crypto.S3.IsRequested(r.Header) || crypto.S3.IsEncrypted(metadata)) {
|
||||
return ObjectOptions{ServerSideEncryption: encrypt.NewSSE(), UserDefined: metadata}, nil
|
||||
}
|
||||
if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) {
|
||||
opts, err = getOpts(ctx, r, bucket, object)
|
||||
opts.UserDefined = metadata
|
||||
return
|
||||
}
|
||||
// default case of passing encryption headers and UserDefined metadata to backend
|
||||
return getDefaultOpts(r.Header, false, metadata)
|
||||
}
|
||||
|
||||
// get ObjectOptions for Copy calls with encryption headers provided on the target side and source side metadata
|
||||
func copyDstOpts(ctx context.Context, r *http.Request, bucket, object string, metadata map[string]string) (opts ObjectOptions, err error) {
|
||||
return putOpts(ctx, r, bucket, object, metadata)
|
||||
}
|
||||
|
||||
// get ObjectOptions for Copy calls with encryption headers provided on the source side
|
||||
func copySrcOpts(ctx context.Context, r *http.Request, bucket, object string) (ObjectOptions, error) {
|
||||
var (
|
||||
ssec encrypt.ServerSide
|
||||
opts ObjectOptions
|
||||
)
|
||||
|
||||
if GlobalGatewaySSE.SSEC() && crypto.SSECopy.IsRequested(r.Header) {
|
||||
key, err := crypto.SSECopy.ParseHTTP(r.Header)
|
||||
if err != nil {
|
||||
return opts, err
|
||||
}
|
||||
derivedKey := deriveClientKey(key, bucket, object)
|
||||
ssec, err = encrypt.NewSSEC(derivedKey[:])
|
||||
if err != nil {
|
||||
return opts, err
|
||||
}
|
||||
return ObjectOptions{ServerSideEncryption: encrypt.SSECopy(ssec)}, nil
|
||||
}
|
||||
|
||||
// default case of passing encryption headers to backend
|
||||
return getDefaultOpts(r.Header, true, nil)
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"testing"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio-go/pkg/encrypt"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
@@ -38,7 +39,7 @@ var hasServerSideEncryptionHeaderTests = []struct {
|
||||
{headers: map[string]string{}, sseRequest: false}, // 4
|
||||
{headers: map[string]string{crypto.SSECopyAlgorithm + " ": "AES256", " " + crypto.SSECopyKey: "key", crypto.SSECopyKeyMD5 + " ": "md5"}, sseRequest: false}, // 5
|
||||
{headers: map[string]string{crypto.SSECopyAlgorithm: "", crypto.SSECopyKey: "", crypto.SSECopyKeyMD5: ""}, sseRequest: false}, // 6
|
||||
{headers: map[string]string{crypto.SSEHeader: ""}, sseRequest: true}, // 6
|
||||
{headers: map[string]string{crypto.SSEHeader: ""}, sseRequest: true}, // 7
|
||||
}
|
||||
|
||||
func TestHasServerSideEncryptionHeader(t *testing.T) {
|
||||
@@ -90,12 +91,12 @@ var hasSSECustomerHeaderTests = []struct {
|
||||
{headers: map[string]string{crypto.SSECKeyMD5: "md5"}, sseRequest: true}, // 3
|
||||
{headers: map[string]string{}, sseRequest: false}, // 4
|
||||
{headers: map[string]string{crypto.SSECAlgorithm + " ": "AES256", " " + crypto.SSECKey: "key", crypto.SSECKeyMD5 + " ": "md5"}, sseRequest: false}, // 5
|
||||
{headers: map[string]string{crypto.SSECAlgorithm: "", crypto.SSECKey: "", crypto.SSECKeyMD5: ""}, sseRequest: false}, // 6
|
||||
{headers: map[string]string{crypto.SSECAlgorithm: "", crypto.SSECKey: "", crypto.SSECKeyMD5: ""}, sseRequest: true}, // 6
|
||||
{headers: map[string]string{crypto.SSEHeader: ""}, sseRequest: false}, // 7
|
||||
|
||||
}
|
||||
|
||||
func TesthasSSECustomerHeader(t *testing.T) {
|
||||
func TestHasSSECustomerHeader(t *testing.T) {
|
||||
for i, test := range hasSSECustomerHeaderTests {
|
||||
headers := http.Header{}
|
||||
for k, v := range test.headers {
|
||||
@@ -107,228 +108,6 @@ func TesthasSSECustomerHeader(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var parseSSECustomerRequestTests = []struct {
|
||||
headers map[string]string
|
||||
useTLS bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 0
|
||||
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
useTLS: true, err: nil,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 1
|
||||
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
useTLS: false, err: errInsecureSSERequest,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECAlgorithm: "AES 256",
|
||||
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 2
|
||||
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
useTLS: true, err: crypto.ErrInvalidCustomerAlgorithm,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "NjE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 3
|
||||
crypto.SSECKeyMD5: "H+jq/LwEOEO90YtiTuNFVw==",
|
||||
},
|
||||
useTLS: true, err: crypto.ErrCustomerKeyMD5Mismatch,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: " jE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 4
|
||||
crypto.SSECKeyMD5: "H+jq/LwEOEO90YtiTuNFVw==",
|
||||
},
|
||||
useTLS: true, err: crypto.ErrInvalidCustomerKey,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "NjE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 5
|
||||
crypto.SSECKeyMD5: " +jq/LwEOEO90YtiTuNFVw==",
|
||||
},
|
||||
useTLS: true, err: crypto.ErrCustomerKeyMD5Mismatch,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 6
|
||||
crypto.SSECKeyMD5: "9KPgDdZNTHimuYCwnJTp5g==",
|
||||
},
|
||||
useTLS: true, err: crypto.ErrInvalidCustomerKey,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "", // 7
|
||||
crypto.SSECKeyMD5: "9KPgDdZNTHimuYCwnJTp5g==",
|
||||
},
|
||||
useTLS: true, err: crypto.ErrMissingCustomerKey,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 8
|
||||
crypto.SSECKeyMD5: "",
|
||||
},
|
||||
useTLS: true, err: crypto.ErrMissingCustomerKeyMD5,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 8
|
||||
crypto.SSECKeyMD5: "",
|
||||
crypto.SSEHeader: "",
|
||||
},
|
||||
useTLS: true, err: crypto.ErrIncompatibleEncryptionMethod,
|
||||
},
|
||||
}
|
||||
|
||||
func TestParseSSECustomerRequest(t *testing.T) {
|
||||
defer func(flag bool) { globalIsSSL = flag }(globalIsSSL)
|
||||
for i, test := range parseSSECustomerRequestTests {
|
||||
headers := http.Header{}
|
||||
for k, v := range test.headers {
|
||||
headers.Set(k, v)
|
||||
}
|
||||
request := &http.Request{}
|
||||
request.Header = headers
|
||||
globalIsSSL = test.useTLS
|
||||
|
||||
_, err := ParseSSECustomerRequest(request)
|
||||
if err != test.err {
|
||||
t.Errorf("Test %d: Parse returned: %v want: %v", i, err, test.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var parseSSECopyCustomerRequestTests = []struct {
|
||||
headers map[string]string
|
||||
metadata map[string]string
|
||||
useTLS bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECopyAlgorithm: "AES256",
|
||||
crypto.SSECopyKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 0
|
||||
crypto.SSECopyKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
metadata: map[string]string{},
|
||||
useTLS: true, err: nil,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECopyAlgorithm: "AES256",
|
||||
crypto.SSECopyKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 0
|
||||
crypto.SSECopyKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": base64.StdEncoding.EncodeToString(make([]byte, 64))},
|
||||
useTLS: true, err: crypto.ErrIncompatibleEncryptionMethod,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECopyAlgorithm: "AES256",
|
||||
crypto.SSECopyKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 1
|
||||
crypto.SSECopyKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
metadata: map[string]string{},
|
||||
useTLS: false, err: errInsecureSSERequest,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECopyAlgorithm: "AES 256",
|
||||
crypto.SSECopyKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 2
|
||||
crypto.SSECopyKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
metadata: map[string]string{},
|
||||
useTLS: true, err: crypto.ErrInvalidCustomerAlgorithm,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECopyAlgorithm: "AES256",
|
||||
crypto.SSECopyKey: "NjE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 3
|
||||
crypto.SSECopyKeyMD5: "H+jq/LwEOEO90YtiTuNFVw==",
|
||||
},
|
||||
metadata: map[string]string{},
|
||||
useTLS: true, err: crypto.ErrCustomerKeyMD5Mismatch,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECopyAlgorithm: "AES256",
|
||||
crypto.SSECopyKey: " jE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 4
|
||||
crypto.SSECopyKeyMD5: "H+jq/LwEOEO90YtiTuNFVw==",
|
||||
},
|
||||
metadata: map[string]string{},
|
||||
useTLS: true, err: crypto.ErrInvalidCustomerKey,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECopyAlgorithm: "AES256",
|
||||
crypto.SSECopyKey: "NjE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 5
|
||||
crypto.SSECopyKeyMD5: " +jq/LwEOEO90YtiTuNFVw==",
|
||||
},
|
||||
metadata: map[string]string{},
|
||||
useTLS: true, err: crypto.ErrCustomerKeyMD5Mismatch,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECopyAlgorithm: "AES256",
|
||||
crypto.SSECopyKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 6
|
||||
crypto.SSECopyKeyMD5: "9KPgDdZNTHimuYCwnJTp5g==",
|
||||
},
|
||||
metadata: map[string]string{},
|
||||
useTLS: true, err: crypto.ErrInvalidCustomerKey,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECopyAlgorithm: "AES256",
|
||||
crypto.SSECopyKey: "", // 7
|
||||
crypto.SSECopyKeyMD5: "9KPgDdZNTHimuYCwnJTp5g==",
|
||||
},
|
||||
metadata: map[string]string{},
|
||||
useTLS: true, err: crypto.ErrMissingCustomerKey,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
crypto.SSECopyAlgorithm: "AES256",
|
||||
crypto.SSECopyKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 8
|
||||
crypto.SSECopyKeyMD5: "",
|
||||
},
|
||||
metadata: map[string]string{},
|
||||
useTLS: true, err: crypto.ErrMissingCustomerKeyMD5,
|
||||
},
|
||||
}
|
||||
|
||||
func TestParseSSECopyCustomerRequest(t *testing.T) {
|
||||
defer func(flag bool) { globalIsSSL = flag }(globalIsSSL)
|
||||
for i, test := range parseSSECopyCustomerRequestTests {
|
||||
headers := http.Header{}
|
||||
for k, v := range test.headers {
|
||||
headers.Set(k, v)
|
||||
}
|
||||
request := &http.Request{}
|
||||
request.Header = headers
|
||||
globalIsSSL = test.useTLS
|
||||
|
||||
_, err := ParseSSECopyCustomerRequest(request.Header, test.metadata)
|
||||
if err != test.err {
|
||||
t.Errorf("Test %d: Parse returned: %v want: %v", i, err, test.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var encryptRequestTests = []struct {
|
||||
header map[string]string
|
||||
metadata map[string]string
|
||||
@@ -362,7 +141,7 @@ func TestEncryptRequest(t *testing.T) {
|
||||
for k, v := range test.header {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
_, err := EncryptRequest(content, req, "bucket", "object", test.metadata)
|
||||
_, _, err := EncryptRequest(content, req, "bucket", "object", test.metadata)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to encrypt request: %v", i, err)
|
||||
@@ -547,7 +326,7 @@ var decryptObjectInfoTests = []struct {
|
||||
|
||||
func TestDecryptObjectInfo(t *testing.T) {
|
||||
for i, test := range decryptObjectInfoTests {
|
||||
if encrypted, err := DecryptObjectInfo(test.info, test.headers); err != test.expErr {
|
||||
if encrypted, err := DecryptObjectInfo(&test.info, test.headers); err != test.expErr {
|
||||
t.Errorf("Test %d: Decryption returned wrong error code: got %d , want %d", i, err, test.expErr)
|
||||
} else if enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
|
||||
t.Errorf("Test %d: Decryption thinks object is encrypted but it is not", i)
|
||||
@@ -557,6 +336,66 @@ func TestDecryptObjectInfo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Tests for issue reproduced when getting the right encrypted
|
||||
// offset of the object.
|
||||
func TestGetDecryptedRange_Issue50(t *testing.T) {
|
||||
rs, err := parseRequestRangeSpec("bytes=594870256-594870263")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
objInfo := ObjectInfo{
|
||||
Bucket: "bucket",
|
||||
Name: "object",
|
||||
Size: 595160760,
|
||||
UserDefined: map[string]string{
|
||||
crypto.SSEMultipart: "",
|
||||
crypto.SSEIV: "HTexa=",
|
||||
crypto.SSESealAlgorithm: "DAREv2-HMAC-SHA256",
|
||||
crypto.SSECSealedKey: "IAA8PGAA==",
|
||||
ReservedMetadataPrefix + "actual-size": "594870264",
|
||||
"content-type": "application/octet-stream",
|
||||
"etag": "166b1545b4c1535294ee0686678bea8c-2",
|
||||
},
|
||||
Parts: []ObjectPartInfo{
|
||||
{
|
||||
Number: 1,
|
||||
Name: "part.1",
|
||||
ETag: "etag1",
|
||||
Size: 297580380,
|
||||
ActualSize: 297435132,
|
||||
},
|
||||
{
|
||||
Number: 2,
|
||||
Name: "part.2",
|
||||
ETag: "etag2",
|
||||
Size: 297580380,
|
||||
ActualSize: 297435132,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
encOff, encLength, skipLen, seqNumber, partStart, err := objInfo.GetDecryptedRange(rs)
|
||||
if err != nil {
|
||||
t.Fatalf("Test: failed %s", err)
|
||||
}
|
||||
if encOff != 595127964 {
|
||||
t.Fatalf("Test: expected %d, got %d", 595127964, encOff)
|
||||
}
|
||||
if encLength != 32796 {
|
||||
t.Fatalf("Test: expected %d, got %d", 32796, encLength)
|
||||
}
|
||||
if skipLen != 32756 {
|
||||
t.Fatalf("Test: expected %d, got %d", 32756, skipLen)
|
||||
}
|
||||
if seqNumber != 4538 {
|
||||
t.Fatalf("Test: expected %d, got %d", 4538, seqNumber)
|
||||
}
|
||||
if partStart != 1 {
|
||||
t.Fatalf("Test: expected %d, got %d", 1, partStart)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDecryptedRange(t *testing.T) {
|
||||
var (
|
||||
pkgSz = int64(64) * humanize.KiByte
|
||||
@@ -666,7 +505,7 @@ func TestGetDecryptedRange(t *testing.T) {
|
||||
var (
|
||||
// make a multipart object-info given part sizes
|
||||
mkMPObj = func(sizes []int64) ObjectInfo {
|
||||
r := make([]objectPartInfo, len(sizes))
|
||||
r := make([]ObjectPartInfo, len(sizes))
|
||||
sum := int64(0)
|
||||
for i, s := range sizes {
|
||||
r[i].Number = i
|
||||
@@ -838,3 +677,84 @@ func TestGetDecryptedRange(t *testing.T) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
var getDefaultOptsTests = []struct {
|
||||
headers http.Header
|
||||
copySource bool
|
||||
metadata map[string]string
|
||||
encryptionType encrypt.Type
|
||||
err error
|
||||
}{
|
||||
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
|
||||
crypto.SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
copySource: false,
|
||||
metadata: nil,
|
||||
encryptionType: encrypt.SSEC,
|
||||
err: nil}, // 0
|
||||
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
|
||||
crypto.SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
copySource: true,
|
||||
metadata: nil,
|
||||
encryptionType: "",
|
||||
err: nil}, // 1
|
||||
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
|
||||
crypto.SSECKey: []string{"Mz"},
|
||||
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
copySource: false,
|
||||
metadata: nil,
|
||||
encryptionType: "",
|
||||
err: crypto.ErrInvalidCustomerKey}, // 2
|
||||
{headers: http.Header{crypto.SSEHeader: []string{"AES256"}},
|
||||
copySource: false,
|
||||
metadata: nil,
|
||||
encryptionType: encrypt.S3,
|
||||
err: nil}, // 3
|
||||
{headers: http.Header{},
|
||||
copySource: false,
|
||||
metadata: map[string]string{crypto.S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)),
|
||||
crypto.S3KMSKeyID: "kms-key",
|
||||
crypto.S3KMSSealedKey: "m-key"},
|
||||
encryptionType: encrypt.S3,
|
||||
err: nil}, // 4
|
||||
{headers: http.Header{},
|
||||
copySource: true,
|
||||
metadata: map[string]string{crypto.S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)),
|
||||
crypto.S3KMSKeyID: "kms-key",
|
||||
crypto.S3KMSSealedKey: "m-key"},
|
||||
encryptionType: "",
|
||||
err: nil}, // 5
|
||||
{headers: http.Header{crypto.SSECopyAlgorithm: []string{"AES256"},
|
||||
crypto.SSECopyKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
crypto.SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
copySource: true,
|
||||
metadata: nil,
|
||||
encryptionType: encrypt.SSEC,
|
||||
err: nil}, // 6
|
||||
{headers: http.Header{crypto.SSECopyAlgorithm: []string{"AES256"},
|
||||
crypto.SSECopyKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
crypto.SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
copySource: false,
|
||||
metadata: nil,
|
||||
encryptionType: "",
|
||||
err: nil}, // 7
|
||||
}
|
||||
|
||||
func TestGetDefaultOpts(t *testing.T) {
|
||||
for i, test := range getDefaultOptsTests {
|
||||
opts, err := getDefaultOpts(test.headers, test.copySource, test.metadata)
|
||||
if test.err != err {
|
||||
t.Errorf("Case %d: expected err: %v , actual err: %v", i, test.err, err)
|
||||
}
|
||||
if err == nil {
|
||||
if opts.ServerSideEncryption == nil && test.encryptionType != "" {
|
||||
t.Errorf("Case %d: expected opts to be of %v encryption type", i, test.encryptionType)
|
||||
|
||||
}
|
||||
if opts.ServerSideEncryption != nil && test.encryptionType != opts.ServerSideEncryption.Type() {
|
||||
t.Errorf("Case %d: expected opts to have encryption type %v but was %v ", i, test.encryptionType, opts.ServerSideEncryption.Type())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -226,6 +226,17 @@ func TestGetSetIndexes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getHexSequences(start int, number int, paddinglen int) (seq []string) {
|
||||
for i := start; i <= number; i++ {
|
||||
if paddinglen == 0 {
|
||||
seq = append(seq, fmt.Sprintf("%x", i))
|
||||
} else {
|
||||
seq = append(seq, fmt.Sprintf(fmt.Sprintf("%%0%dx", paddinglen), i))
|
||||
}
|
||||
}
|
||||
return seq
|
||||
}
|
||||
|
||||
func getSequences(start int, number int, paddinglen int) (seq []string) {
|
||||
for i := start; i <= number; i++ {
|
||||
if paddinglen == 0 {
|
||||
@@ -287,9 +298,9 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"/export/set",
|
||||
"",
|
||||
getSequences(1, 64, 0),
|
||||
Prefix: "/export/set",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 64, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -305,14 +316,14 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"",
|
||||
"",
|
||||
getSequences(1, 64, 0),
|
||||
Prefix: "",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 64, 0),
|
||||
},
|
||||
{
|
||||
"http://minio",
|
||||
"/export/set",
|
||||
getSequences(2, 3, 0),
|
||||
Prefix: "http://minio",
|
||||
Suffix: "/export/set",
|
||||
Seq: getSequences(2, 3, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -328,9 +339,9 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"http://minio",
|
||||
".mydomain.net/data",
|
||||
getSequences(1, 64, 0),
|
||||
Prefix: "http://minio",
|
||||
Suffix: ".mydomain.net/data",
|
||||
Seq: getSequences(1, 64, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -345,14 +356,14 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"",
|
||||
"/data",
|
||||
getSequences(1, 16, 0),
|
||||
Prefix: "",
|
||||
Suffix: "/data",
|
||||
Seq: getSequences(1, 16, 0),
|
||||
},
|
||||
{
|
||||
"http://rack",
|
||||
".mydomain.minio",
|
||||
getSequences(1, 4, 0),
|
||||
Prefix: "http://rack",
|
||||
Suffix: ".mydomain.minio",
|
||||
Seq: getSequences(1, 4, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -368,14 +379,14 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"",
|
||||
"",
|
||||
getSequences(0, 1, 0),
|
||||
Prefix: "",
|
||||
Suffix: "",
|
||||
Seq: getSequences(0, 1, 0),
|
||||
},
|
||||
{
|
||||
"http://minio",
|
||||
".mydomain.net/data",
|
||||
getSequences(0, 15, 0),
|
||||
Prefix: "http://minio",
|
||||
Suffix: ".mydomain.net/data",
|
||||
Seq: getSequences(0, 15, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -391,9 +402,9 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"http://server1/data",
|
||||
"",
|
||||
getSequences(1, 32, 0),
|
||||
Prefix: "http://server1/data",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 32, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -409,9 +420,9 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"http://server1/data",
|
||||
"",
|
||||
getSequences(1, 32, 2),
|
||||
Prefix: "http://server1/data",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 32, 2),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -427,19 +438,19 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"",
|
||||
"",
|
||||
getSequences(1, 2, 0),
|
||||
Prefix: "",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 2, 0),
|
||||
},
|
||||
{
|
||||
"",
|
||||
"/test",
|
||||
getSequences(1, 64, 0),
|
||||
Prefix: "",
|
||||
Suffix: "/test",
|
||||
Seq: getSequences(1, 64, 0),
|
||||
},
|
||||
{
|
||||
"http://minio",
|
||||
"/export/set",
|
||||
getSequences(2, 3, 0),
|
||||
Prefix: "http://minio",
|
||||
Suffix: "/export/set",
|
||||
Seq: getSequences(2, 3, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -456,14 +467,60 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"",
|
||||
"",
|
||||
getSequences(1, 10, 0),
|
||||
Prefix: "",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 10, 0),
|
||||
},
|
||||
{
|
||||
"/export",
|
||||
"/disk",
|
||||
getSequences(1, 10, 0),
|
||||
Prefix: "/export",
|
||||
Suffix: "/disk",
|
||||
Seq: getSequences(1, 10, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
[][]uint64{{10, 10, 10, 10, 10, 10, 10, 10, 10, 10}},
|
||||
},
|
||||
true,
|
||||
},
|
||||
// IPv6 ellipses with hexadecimal expansion
|
||||
{
|
||||
"http://[2001:3984:3989::{1...a}]/disk{1...10}",
|
||||
endpointSet{
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
Prefix: "",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 10, 0),
|
||||
},
|
||||
{
|
||||
Prefix: "http://[2001:3984:3989::",
|
||||
Suffix: "]/disk",
|
||||
Seq: getHexSequences(1, 10, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
[][]uint64{{10, 10, 10, 10, 10, 10, 10, 10, 10, 10}},
|
||||
},
|
||||
true,
|
||||
},
|
||||
// IPv6 ellipses with hexadecimal expansion with 3 position numerics.
|
||||
{
|
||||
"http://[2001:3984:3989::{001...00a}]/disk{1...10}",
|
||||
endpointSet{
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
Prefix: "",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 10, 0),
|
||||
},
|
||||
{
|
||||
Prefix: "http://[2001:3984:3989::",
|
||||
Suffix: "]/disk",
|
||||
Seq: getHexSequences(1, 10, 3),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
130
cmd/endpoint.go
130
cmd/endpoint.go
@@ -29,6 +29,9 @@ import (
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/cpu"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/mem"
|
||||
"github.com/minio/minio/pkg/mountinfo"
|
||||
)
|
||||
|
||||
@@ -91,7 +94,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
|
||||
// - Scheme field must contain "http" or "https"
|
||||
// - All field should be empty except Host and Path.
|
||||
if !((u.Scheme == "http" || u.Scheme == "https") &&
|
||||
u.User == nil && u.Opaque == "" && u.ForceQuery == false && u.RawQuery == "" && u.Fragment == "") {
|
||||
u.User == nil && u.Opaque == "" && !u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
|
||||
return ep, fmt.Errorf("invalid URL endpoint format")
|
||||
}
|
||||
|
||||
@@ -112,6 +115,9 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
|
||||
return ep, fmt.Errorf("invalid URL endpoint format: port number must be between 1 to 65535")
|
||||
}
|
||||
}
|
||||
if i := strings.Index(host, "%"); i > -1 {
|
||||
host = host[:i]
|
||||
}
|
||||
|
||||
if host == "" {
|
||||
return ep, fmt.Errorf("invalid URL endpoint format: empty host name")
|
||||
@@ -152,7 +158,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
|
||||
// Only check if the arg is an ip address and ask for scheme since its absent.
|
||||
// localhost, example.com, any FQDN cannot be disambiguated from a regular file path such as
|
||||
// /mnt/export1. So we go ahead and start the minio server in FS modes in these cases.
|
||||
if isHostIPv4(arg) {
|
||||
if isHostIP(arg) {
|
||||
return ep, fmt.Errorf("invalid URL endpoint format: missing scheme http or https")
|
||||
}
|
||||
u = &url.URL{Path: path.Clean(arg)}
|
||||
@@ -194,6 +200,78 @@ func (endpoints EndpointList) GetString(i int) string {
|
||||
return endpoints[i].String()
|
||||
}
|
||||
|
||||
// localEndpointsMemUsage - returns ServerMemUsageInfo for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func localEndpointsMemUsage(endpoints EndpointList) ServerMemUsageInfo {
|
||||
var memUsages []mem.Usage
|
||||
var historicUsages []mem.Usage
|
||||
scratchSpace := map[string]bool{}
|
||||
for _, endpoint := range endpoints {
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
if _, ok := scratchSpace[endpoint.Host]; ok {
|
||||
continue
|
||||
}
|
||||
memUsages = append(memUsages, mem.GetUsage())
|
||||
historicUsages = append(historicUsages, mem.GetHistoricUsage())
|
||||
scratchSpace[endpoint.Host] = true
|
||||
}
|
||||
}
|
||||
return ServerMemUsageInfo{
|
||||
Addr: GetLocalPeer(endpoints),
|
||||
Usage: memUsages,
|
||||
HistoricUsage: historicUsages,
|
||||
}
|
||||
}
|
||||
|
||||
// localEndpointsCPULoad - returns ServerCPULoadInfo for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func localEndpointsCPULoad(endpoints EndpointList) ServerCPULoadInfo {
|
||||
var cpuLoads []cpu.Load
|
||||
var historicLoads []cpu.Load
|
||||
scratchSpace := map[string]bool{}
|
||||
for _, endpoint := range endpoints {
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
if _, ok := scratchSpace[endpoint.Host]; ok {
|
||||
continue
|
||||
}
|
||||
cpuLoads = append(cpuLoads, cpu.GetLoad())
|
||||
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
|
||||
scratchSpace[endpoint.Host] = true
|
||||
}
|
||||
}
|
||||
return ServerCPULoadInfo{
|
||||
Addr: GetLocalPeer(endpoints),
|
||||
Load: cpuLoads,
|
||||
HistoricLoad: historicLoads,
|
||||
}
|
||||
}
|
||||
|
||||
// localEndpointsDrivePerf - returns ServerDrivesPerfInfo for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func localEndpointsDrivePerf(endpoints EndpointList) ServerDrivesPerfInfo {
|
||||
var dps []disk.Performance
|
||||
for _, endpoint := range endpoints {
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
if _, err := os.Stat(endpoint.Path); err != nil {
|
||||
// Since this drive is not available, add relevant details and proceed
|
||||
dps = append(dps, disk.Performance{Path: endpoint.Path, Error: err.Error()})
|
||||
continue
|
||||
}
|
||||
dp := disk.GetPerformance(pathJoin(endpoint.Path, minioMetaTmpBucket, mustGetUUID()))
|
||||
dp.Path = endpoint.Path
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
}
|
||||
|
||||
return ServerDrivesPerfInfo{
|
||||
Addr: GetLocalPeer(endpoints),
|
||||
Perf: dps,
|
||||
}
|
||||
}
|
||||
|
||||
// NewEndpointList - returns new endpoint list based on input args.
|
||||
func NewEndpointList(args ...string) (endpoints EndpointList, err error) {
|
||||
var endpointType EndpointType
|
||||
@@ -224,7 +302,6 @@ func NewEndpointList(args ...string) (endpoints EndpointList, err error) {
|
||||
uniqueArgs.Add(arg)
|
||||
endpoints = append(endpoints, endpoint)
|
||||
}
|
||||
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
@@ -341,7 +418,7 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
||||
if err != nil {
|
||||
host = endpoint.Host
|
||||
}
|
||||
hostIPSet, _ := getHostIP4(host)
|
||||
hostIPSet, _ := getHostIP(host)
|
||||
if IPSet, ok := pathIPMap[endpoint.Path]; ok {
|
||||
if !IPSet.Intersection(hostIPSet).IsEmpty() {
|
||||
return serverAddr, endpoints, setupType,
|
||||
@@ -411,12 +488,12 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
||||
host = localServerAddr
|
||||
}
|
||||
|
||||
ipList, err := getHostIP4(host)
|
||||
ipList, err := getHostIP(host)
|
||||
logger.FatalIf(err, "unexpected error when resolving host '%s'", host)
|
||||
|
||||
// Filter ipList by IPs those start with '127.'.
|
||||
// Filter ipList by IPs those start with '127.' or '::1'
|
||||
loopBackIPs := ipList.FuncMatch(func(ip string, matchString string) bool {
|
||||
return strings.HasPrefix(ip, "127.")
|
||||
return strings.HasPrefix(ip, "127.") || strings.HasPrefix(ip, "::1")
|
||||
}, "")
|
||||
|
||||
// If loop back IP is found and ipList contains only loop back IPs, then error out.
|
||||
@@ -455,7 +532,12 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
||||
return serverAddr, endpoints, setupType, err
|
||||
}
|
||||
|
||||
updateDomainIPs(uniqueArgs)
|
||||
_, dok := os.LookupEnv("MINIO_DOMAIN")
|
||||
_, eok := os.LookupEnv("MINIO_ETCD_ENDPOINTS")
|
||||
_, iok := os.LookupEnv("MINIO_PUBLIC_IPS")
|
||||
if dok && eok && !iok {
|
||||
updateDomainIPs(uniqueArgs)
|
||||
}
|
||||
|
||||
setupType = DistXLSetupType
|
||||
return serverAddr, endpoints, setupType, nil
|
||||
@@ -480,10 +562,10 @@ func GetLocalPeer(endpoints EndpointList) (localPeer string) {
|
||||
// Local peer can be empty in FS or Erasure coded mode.
|
||||
// If so, return globalMinioHost + globalMinioPort value.
|
||||
if globalMinioHost != "" {
|
||||
return globalMinioHost + ":" + globalMinioPort
|
||||
return net.JoinHostPort(globalMinioHost, globalMinioPort)
|
||||
}
|
||||
|
||||
return "127.0.0.1:" + globalMinioPort
|
||||
return net.JoinHostPort("127.0.0.1", globalMinioPort)
|
||||
}
|
||||
return peerSet.ToSlice()[0]
|
||||
}
|
||||
@@ -509,21 +591,21 @@ func GetRemotePeers(endpoints EndpointList) []string {
|
||||
return peerSet.ToSlice()
|
||||
}
|
||||
|
||||
// In federated and distributed setup, update IP addresses of the hosts passed in command line
|
||||
// if MINIO_PUBLIC_IPS are not set manually
|
||||
func updateDomainIPs(endPoints set.StringSet) {
|
||||
_, dok := os.LookupEnv("MINIO_DOMAIN")
|
||||
_, eok := os.LookupEnv("MINIO_ETCD_ENDPOINTS")
|
||||
_, iok := os.LookupEnv("MINIO_PUBLIC_IPS")
|
||||
if dok && eok && !iok {
|
||||
globalDomainIPs = set.NewStringSet()
|
||||
for e := range endPoints {
|
||||
host, _, _ := net.SplitHostPort(e)
|
||||
ipList, _ := getHostIP4(host)
|
||||
remoteIPList := ipList.FuncMatch(func(ip string, matchString string) bool {
|
||||
return !strings.HasPrefix(ip, "127.")
|
||||
}, "")
|
||||
globalDomainIPs.Add(remoteIPList.ToSlice()[0])
|
||||
ipList := set.NewStringSet()
|
||||
for e := range endPoints {
|
||||
host, _, err := net.SplitHostPort(e)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "missing port in address") {
|
||||
host = e
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
IPs, _ := getHostIP(host)
|
||||
ipList = ipList.Union(IPs)
|
||||
}
|
||||
globalDomainIPs = ipList.FuncMatch(func(ip string, matchString string) bool {
|
||||
return !strings.HasPrefix(ip, "127.") || strings.HasPrefix(ip, "::1")
|
||||
}, "")
|
||||
}
|
||||
|
||||
@@ -304,27 +304,27 @@ func TestCreateEndpoints(t *testing.T) {
|
||||
}, DistXLSetupType, nil},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
for i, testCase := range testCases {
|
||||
serverAddr, endpoints, setupType, err := CreateEndpoints(testCase.serverAddr, testCase.args...)
|
||||
|
||||
if err == nil {
|
||||
if testCase.expectedErr != nil {
|
||||
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
|
||||
t.Fatalf("Test (%d) error: expected = %v, got = <nil>", i+1, testCase.expectedErr)
|
||||
} else {
|
||||
if serverAddr != testCase.expectedServerAddr {
|
||||
t.Fatalf("serverAddr: expected = %v, got = %v", testCase.expectedServerAddr, serverAddr)
|
||||
t.Fatalf("Test (%d) serverAddr: expected = %v, got = %v", i+1, testCase.expectedServerAddr, serverAddr)
|
||||
}
|
||||
if !reflect.DeepEqual(endpoints, testCase.expectedEndpoints) {
|
||||
t.Fatalf("endpoints: expected = %v, got = %v", testCase.expectedEndpoints, endpoints)
|
||||
t.Fatalf("Test (%d) endpoints: expected = %v, got = %v", i+1, testCase.expectedEndpoints, endpoints)
|
||||
}
|
||||
if setupType != testCase.expectedSetupType {
|
||||
t.Fatalf("setupType: expected = %v, got = %v", testCase.expectedSetupType, setupType)
|
||||
t.Fatalf("Test (%d) setupType: expected = %v, got = %v", i+1, testCase.expectedSetupType, setupType)
|
||||
}
|
||||
}
|
||||
} else if testCase.expectedErr == nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
t.Fatalf("Test (%d) error: expected = <nil>, got = %v", i+1, err)
|
||||
} else if err.Error() != testCase.expectedErr.Error() {
|
||||
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
|
||||
t.Fatalf("Test (%d) error: expected = %v, got = %v", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
182
cmd/environment.go
Normal file
182
cmd/environment.go
Normal file
@@ -0,0 +1,182 @@
|
||||
// Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
)
|
||||
|
||||
const (
|
||||
// EnvKMSMasterKey is the environment variable used to specify
|
||||
// a KMS master key used to protect SSE-S3 per-object keys.
|
||||
// Valid values must be of the from: "KEY_ID:32_BYTE_HEX_VALUE".
|
||||
EnvKMSMasterKey = "MINIO_SSE_MASTER_KEY"
|
||||
|
||||
// EnvAutoEncryption is the environment variable used to en/disable
|
||||
// SSE-S3 auto-encryption. SSE-S3 auto-encryption, if enabled,
|
||||
// requires a valid KMS configuration and turns any non-SSE-C
|
||||
// request into an SSE-S3 request.
|
||||
// If present EnvAutoEncryption must be either "on" or "off".
|
||||
EnvAutoEncryption = "MINIO_SSE_AUTO_ENCRYPTION"
|
||||
)
|
||||
|
||||
const (
|
||||
// EnvVaultEndpoint is the environment variable used to specify
|
||||
// the vault HTTPS endpoint.
|
||||
EnvVaultEndpoint = "MINIO_SSE_VAULT_ENDPOINT"
|
||||
|
||||
// EnvVaultAuthType is the environment variable used to specify
|
||||
// the authentication type for vault.
|
||||
EnvVaultAuthType = "MINIO_SSE_VAULT_AUTH_TYPE"
|
||||
|
||||
// EnvVaultAppRoleID is the environment variable used to specify
|
||||
// the vault AppRole ID.
|
||||
EnvVaultAppRoleID = "MINIO_SSE_VAULT_APPROLE_ID"
|
||||
|
||||
// EnvVaultAppSecretID is the environment variable used to specify
|
||||
// the vault AppRole secret corresponding to the AppRole ID.
|
||||
EnvVaultAppSecretID = "MINIO_SSE_VAULT_APPROLE_SECRET"
|
||||
|
||||
// EnvVaultKeyVersion is the environment variable used to specify
|
||||
// the vault key version.
|
||||
EnvVaultKeyVersion = "MINIO_SSE_VAULT_KEY_VERSION"
|
||||
|
||||
// EnvVaultKeyName is the environment variable used to specify
|
||||
// the vault named key-ring. In the S3 context it's referred as
|
||||
// customer master key ID (CMK-ID).
|
||||
EnvVaultKeyName = "MINIO_SSE_VAULT_KEY_NAME"
|
||||
|
||||
// EnvVaultCAPath is the environment variable used to specify the
|
||||
// path to a directory of PEM-encoded CA cert files. These CA cert
|
||||
// files are used to authenticate Minio to Vault over mTLS.
|
||||
EnvVaultCAPath = "MINIO_SSE_VAULT_CAPATH"
|
||||
|
||||
// EnvVaultNamespace is the environment variable used to specify
|
||||
// vault namespace. The vault namespace is used if the enterprise
|
||||
// version of Hashicorp Vault is used.
|
||||
EnvVaultNamespace = "MINIO_SSE_VAULT_NAMESPACE"
|
||||
)
|
||||
|
||||
// Environment provides functions for accessing environment
|
||||
// variables.
|
||||
var Environment = environment{}
|
||||
|
||||
type environment struct{}
|
||||
|
||||
// Get retrieves the value of the environment variable named
|
||||
// by the key. If the variable is present in the environment the
|
||||
// value (which may be empty) is returned. Otherwise it returns
|
||||
// the specified default value.
|
||||
func (environment) Get(key, defaultValue string) string {
|
||||
if v, ok := os.LookupEnv(key); ok {
|
||||
return v
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// Lookup retrieves the value of the environment variable named
|
||||
// by the key. If the variable is present in the environment the
|
||||
// value (which may be empty) is returned and the boolean is true.
|
||||
// Otherwise the returned value will be empty and the boolean will
|
||||
// be false.
|
||||
func (environment) Lookup(key string) (string, bool) { return os.LookupEnv(key) }
|
||||
|
||||
// LookupKMSConfig extracts the KMS configuration provided by environment
|
||||
// variables and merge them with the provided KMS configuration. The
|
||||
// merging follows the following rules:
|
||||
//
|
||||
// 1. A valid value provided as environment variable is higher prioritized
|
||||
// than the provided configuration and overwrites the value from the
|
||||
// configuration file.
|
||||
//
|
||||
// 2. A value specified as environment variable never changes the configuration
|
||||
// file. So it is never made a persistent setting.
|
||||
//
|
||||
// It sets the global KMS configuration according to the merged configuration
|
||||
// on success.
|
||||
func (env environment) LookupKMSConfig(config crypto.KMSConfig) (err error) {
|
||||
// Lookup Hashicorp-Vault configuration & overwrite config entry if ENV var is present
|
||||
config.Vault.Endpoint = env.Get(EnvVaultEndpoint, config.Vault.Endpoint)
|
||||
config.Vault.CAPath = env.Get(EnvVaultCAPath, config.Vault.CAPath)
|
||||
config.Vault.Auth.Type = env.Get(EnvVaultAuthType, config.Vault.Auth.Type)
|
||||
config.Vault.Auth.AppRole.ID = env.Get(EnvVaultAppRoleID, config.Vault.Auth.AppRole.ID)
|
||||
config.Vault.Auth.AppRole.Secret = env.Get(EnvVaultAppSecretID, config.Vault.Auth.AppRole.Secret)
|
||||
config.Vault.Key.Name = env.Get(EnvVaultKeyName, config.Vault.Key.Name)
|
||||
config.Vault.Namespace = env.Get(EnvVaultNamespace, config.Vault.Namespace)
|
||||
keyVersion := env.Get(EnvVaultKeyVersion, strconv.Itoa(config.Vault.Key.Version))
|
||||
config.Vault.Key.Version, err = strconv.Atoi(keyVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid ENV variable: Unable to parse %s value (`%s`)", EnvVaultKeyVersion, keyVersion)
|
||||
}
|
||||
if err = config.Vault.Verify(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Lookup KMS master keys - only available through ENV.
|
||||
if masterKey, ok := env.Lookup(EnvKMSMasterKey); ok {
|
||||
if !config.Vault.IsEmpty() { // Vault and KMS master key provided
|
||||
return errors.New("Ambiguous KMS configuration: vault configuration and a master key are provided at the same time")
|
||||
}
|
||||
globalKMSKeyID, GlobalKMS, err = parseKMSMasterKey(masterKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !config.Vault.IsEmpty() {
|
||||
GlobalKMS, err = crypto.NewVault(config.Vault)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
globalKMSKeyID = config.Vault.Key.Name
|
||||
}
|
||||
|
||||
autoEncryption, err := ParseBoolFlag(env.Get(EnvAutoEncryption, "off"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
globalAutoEncryption = bool(autoEncryption)
|
||||
if globalAutoEncryption && GlobalKMS == nil { // auto-encryption enabled but no KMS
|
||||
return errors.New("Invalid KMS configuration: auto-encryption is enabled but no valid KMS configuration is present")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseKMSMasterKey parses the value of the environment variable
|
||||
// `EnvKMSMasterKey` and returns a key-ID and a master-key KMS on success.
|
||||
func parseKMSMasterKey(envArg string) (string, crypto.KMS, error) {
|
||||
values := strings.SplitN(envArg, ":", 2)
|
||||
if len(values) != 2 {
|
||||
return "", nil, fmt.Errorf("Invalid KMS master key: %s does not contain a ':'", envArg)
|
||||
}
|
||||
var (
|
||||
keyID = values[0]
|
||||
hexKey = values[1]
|
||||
)
|
||||
if len(hexKey) != 64 { // 2 hex bytes = 1 byte
|
||||
return "", nil, fmt.Errorf("Invalid KMS master key: %s not a 32 bytes long HEX value", hexKey)
|
||||
}
|
||||
var masterKey [32]byte
|
||||
if _, err := hex.Decode(masterKey[:], []byte(hexKey)); err != nil {
|
||||
return "", nil, fmt.Errorf("Invalid KMS master key: %s not a 32 bytes long HEX value", hexKey)
|
||||
}
|
||||
return keyID, crypto.NewKMS(masterKey), nil
|
||||
}
|
||||
@@ -19,29 +19,30 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
// Reads in parallel from bitrotReaders.
|
||||
// Reads in parallel from readers.
|
||||
type parallelReader struct {
|
||||
readers []*bitrotReader
|
||||
readers []io.ReaderAt
|
||||
dataBlocks int
|
||||
offset int64
|
||||
shardSize int64
|
||||
shardFileSize int64
|
||||
buf [][]byte
|
||||
}
|
||||
|
||||
// newParallelReader returns parallelReader.
|
||||
func newParallelReader(readers []*bitrotReader, dataBlocks int, offset int64, fileSize int64, blocksize int64) *parallelReader {
|
||||
shardSize := ceilFrac(blocksize, int64(dataBlocks))
|
||||
shardFileSize := getErasureShardFileSize(blocksize, fileSize, dataBlocks)
|
||||
func newParallelReader(readers []io.ReaderAt, e Erasure, offset, totalLength int64) *parallelReader {
|
||||
return ¶llelReader{
|
||||
readers,
|
||||
dataBlocks,
|
||||
(offset / blocksize) * shardSize,
|
||||
shardSize,
|
||||
shardFileSize,
|
||||
e.dataBlocks,
|
||||
(offset / e.blockSize) * e.ShardSize(),
|
||||
e.ShardSize(),
|
||||
e.ShardFileSize(totalLength),
|
||||
make([][]byte, len(readers)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,79 +57,83 @@ func (p *parallelReader) canDecode(buf [][]byte) bool {
|
||||
return bufCount >= p.dataBlocks
|
||||
}
|
||||
|
||||
// Read reads from bitrotReaders in parallel. Returns p.dataBlocks number of bufs.
|
||||
// Read reads from readers in parallel. Returns p.dataBlocks number of bufs.
|
||||
func (p *parallelReader) Read() ([][]byte, error) {
|
||||
type errIdx struct {
|
||||
idx int
|
||||
buf []byte
|
||||
err error
|
||||
}
|
||||
|
||||
errCh := make(chan errIdx)
|
||||
currReaderIndex := 0
|
||||
newBuf := make([][]byte, len(p.readers))
|
||||
var newBufLK sync.RWMutex
|
||||
|
||||
if p.offset+p.shardSize > p.shardFileSize {
|
||||
p.shardSize = p.shardFileSize - p.offset
|
||||
}
|
||||
|
||||
read := func(currReaderIndex int) {
|
||||
b, err := p.readers[currReaderIndex].ReadChunk(p.offset, p.shardSize)
|
||||
errCh <- errIdx{currReaderIndex, b, err}
|
||||
readTriggerCh := make(chan bool, len(p.readers))
|
||||
for i := 0; i < p.dataBlocks; i++ {
|
||||
// Setup read triggers for p.dataBlocks number of reads so that it reads in parallel.
|
||||
readTriggerCh <- true
|
||||
}
|
||||
|
||||
readerCount := 0
|
||||
for _, r := range p.readers {
|
||||
if r != nil {
|
||||
readerCount++
|
||||
}
|
||||
}
|
||||
if readerCount < p.dataBlocks {
|
||||
return nil, errXLReadQuorum
|
||||
}
|
||||
|
||||
readerCount = 0
|
||||
for i, r := range p.readers {
|
||||
if r == nil {
|
||||
continue
|
||||
}
|
||||
go read(i)
|
||||
readerCount++
|
||||
if readerCount == p.dataBlocks {
|
||||
currReaderIndex = i + 1
|
||||
readerIndex := 0
|
||||
var wg sync.WaitGroup
|
||||
// if readTrigger is true, it implies next disk.ReadAt() should be tried
|
||||
// if readTrigger is false, it implies previous disk.ReadAt() was successful and there is no need
|
||||
// to try reading the next disk.
|
||||
for readTrigger := range readTriggerCh {
|
||||
newBufLK.RLock()
|
||||
canDecode := p.canDecode(newBuf)
|
||||
newBufLK.RUnlock()
|
||||
if canDecode {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for errVal := range errCh {
|
||||
if errVal.err == nil {
|
||||
newBuf[errVal.idx] = errVal.buf
|
||||
if p.canDecode(newBuf) {
|
||||
p.offset += int64(p.shardSize)
|
||||
return newBuf, nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
p.readers[errVal.idx] = nil
|
||||
for currReaderIndex < len(p.readers) {
|
||||
if p.readers[currReaderIndex] != nil {
|
||||
break
|
||||
}
|
||||
currReaderIndex++
|
||||
}
|
||||
|
||||
if currReaderIndex == len(p.readers) {
|
||||
if readerIndex == len(p.readers) {
|
||||
break
|
||||
}
|
||||
go read(currReaderIndex)
|
||||
currReaderIndex++
|
||||
if !readTrigger {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
disk := p.readers[i]
|
||||
if disk == nil {
|
||||
// Since disk is nil, trigger another read.
|
||||
readTriggerCh <- true
|
||||
return
|
||||
}
|
||||
if p.buf[i] == nil {
|
||||
// Reading first time on this disk, hence the buffer needs to be allocated.
|
||||
// Subsequent reads will re-use this buffer.
|
||||
p.buf[i] = make([]byte, p.shardSize)
|
||||
}
|
||||
// For the last shard, the shardsize might be less than previous shard sizes.
|
||||
// Hence the following statement ensures that the buffer size is reset to the right size.
|
||||
p.buf[i] = p.buf[i][:p.shardSize]
|
||||
_, err := disk.ReadAt(p.buf[i], p.offset)
|
||||
if err != nil {
|
||||
p.readers[i] = nil
|
||||
// Since ReadAt returned error, trigger another read.
|
||||
readTriggerCh <- true
|
||||
return
|
||||
}
|
||||
newBufLK.Lock()
|
||||
newBuf[i] = p.buf[i]
|
||||
newBufLK.Unlock()
|
||||
// Since ReadAt returned success, there is no need to trigger another read.
|
||||
readTriggerCh <- false
|
||||
}(readerIndex)
|
||||
readerIndex++
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if p.canDecode(newBuf) {
|
||||
p.offset += p.shardSize
|
||||
return newBuf, nil
|
||||
}
|
||||
|
||||
return nil, errXLReadQuorum
|
||||
}
|
||||
|
||||
// Decode reads from readers, reconstructs data if needed and writes the data to the writer.
|
||||
func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []*bitrotReader, offset, length, totalLength int64) error {
|
||||
func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []io.ReaderAt, offset, length, totalLength int64) error {
|
||||
if offset < 0 || length < 0 {
|
||||
logger.LogIf(ctx, errInvalidArgument)
|
||||
return errInvalidArgument
|
||||
@@ -141,7 +146,7 @@ func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []*bitrot
|
||||
return nil
|
||||
}
|
||||
|
||||
reader := newParallelReader(readers, e.dataBlocks, offset, totalLength, e.blockSize)
|
||||
reader := newParallelReader(readers, e, offset, totalLength)
|
||||
|
||||
startBlock := offset / e.blockSize
|
||||
endBlock := (offset + length) / e.blockSize
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
func (d badDisk) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) {
|
||||
func (a badDisk) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) {
|
||||
return 0, errFaultyDisk
|
||||
}
|
||||
|
||||
@@ -41,26 +41,28 @@ var erasureDecodeTests = []struct {
|
||||
algorithm BitrotAlgorithm
|
||||
shouldFail, shouldFailQuorum bool
|
||||
}{
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 0
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 1
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 2
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 1, length: oneMiByte - 1, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 3
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: oneMiByte, length: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 4
|
||||
{dataBlocks: 7, onDisks: 14, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: 3, length: 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 5
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: 4, length: 8 * 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 6
|
||||
{dataBlocks: 7, onDisks: 14, offDisks: 7, blocksize: int64(blockSizeV1), data: oneMiByte, offset: oneMiByte, length: 1, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 7
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 6, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 8
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 5, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 9
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 4, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 10
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 3, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 11
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 2, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 12
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 1, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 13
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 2, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 14
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 3, blocksize: int64(2 * oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 15
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 6, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 16
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 2, blocksize: int64(blockSizeV1), data: 2 * oneMiByte, offset: oneMiByte, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 17
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 1, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 18
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 3, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 19
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 0
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 1
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 2
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 1, length: oneMiByte - 1, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 3
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: oneMiByte, length: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false},
|
||||
// 4
|
||||
{dataBlocks: 7, onDisks: 14, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: 3, length: 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 5
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: 4, length: 8 * 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 6
|
||||
{dataBlocks: 7, onDisks: 14, offDisks: 7, blocksize: int64(blockSizeV1), data: oneMiByte, offset: oneMiByte, length: 1, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 7
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 6, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 8
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 5, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 9
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 4, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 10
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 3, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 11
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 2, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 12
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 1, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 13
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 2, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 14
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 3, blocksize: int64(2 * oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 15
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 6, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 16
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 2, blocksize: int64(blockSizeV1), data: 2 * oneMiByte, offset: oneMiByte, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 17
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 1, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 18
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 3, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false},
|
||||
// 19
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 7, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 20
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 21
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 9, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 22
|
||||
@@ -104,11 +106,12 @@ func TestErasureDecode(t *testing.T) {
|
||||
writeAlgorithm = DefaultBitrotAlgorithm
|
||||
}
|
||||
buffer := make([]byte, test.blocksize, 2*test.blocksize)
|
||||
writers := make([]*bitrotWriter, len(disks))
|
||||
writers := make([]io.Writer, len(disks))
|
||||
for i, disk := range disks {
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", writeAlgorithm)
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize())
|
||||
}
|
||||
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[:]), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
if err != nil {
|
||||
setup.Remove()
|
||||
t.Fatalf("Test %d: failed to create erasure test file: %v", i, err)
|
||||
@@ -124,17 +127,19 @@ func TestErasureDecode(t *testing.T) {
|
||||
}
|
||||
|
||||
// Get the checksums of the current part.
|
||||
bitrotReaders := make([]*bitrotReader, len(disks))
|
||||
bitrotReaders := make([]io.ReaderAt, len(disks))
|
||||
for index, disk := range disks {
|
||||
if disk == OfflineDisk {
|
||||
continue
|
||||
}
|
||||
endOffset := getErasureShardFileEndOffset(test.offset, test.length, test.data, test.blocksize, erasure.dataBlocks)
|
||||
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", writeAlgorithm, endOffset, writers[index].Sum())
|
||||
tillOffset := erasure.ShardFileTillOffset(test.offset, test.length, test.data)
|
||||
|
||||
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize())
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer(nil)
|
||||
err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data)
|
||||
closeBitrotReaders(bitrotReaders)
|
||||
if err != nil && !test.shouldFail {
|
||||
t.Errorf("Test %d: should pass but failed with: %v", i, err)
|
||||
}
|
||||
@@ -143,31 +148,41 @@ func TestErasureDecode(t *testing.T) {
|
||||
}
|
||||
if err == nil {
|
||||
if content := writer.Bytes(); !bytes.Equal(content, data[test.offset:test.offset+test.length]) {
|
||||
t.Errorf("Test %d: read retruns wrong file content", i)
|
||||
t.Errorf("Test %d: read retruns wrong file content.", i)
|
||||
}
|
||||
}
|
||||
|
||||
for i, r := range bitrotReaders {
|
||||
if r == nil {
|
||||
disks[i] = OfflineDisk
|
||||
}
|
||||
}
|
||||
if err == nil && !test.shouldFail {
|
||||
bitrotReaders = make([]*bitrotReader, len(disks))
|
||||
bitrotReaders = make([]io.ReaderAt, len(disks))
|
||||
for index, disk := range disks {
|
||||
if disk == OfflineDisk {
|
||||
continue
|
||||
}
|
||||
endOffset := getErasureShardFileEndOffset(test.offset, test.length, test.data, test.blocksize, erasure.dataBlocks)
|
||||
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", writeAlgorithm, endOffset, writers[index].Sum())
|
||||
tillOffset := erasure.ShardFileTillOffset(test.offset, test.length, test.data)
|
||||
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize())
|
||||
}
|
||||
for j := range disks[:test.offDisks] {
|
||||
bitrotReaders[j].disk = badDisk{nil}
|
||||
if bitrotReaders[j] == nil {
|
||||
continue
|
||||
}
|
||||
switch r := bitrotReaders[j].(type) {
|
||||
case *wholeBitrotReader:
|
||||
r.disk = badDisk{nil}
|
||||
case *streamingBitrotReader:
|
||||
r.disk = badDisk{nil}
|
||||
}
|
||||
}
|
||||
if test.offDisks > 0 {
|
||||
bitrotReaders[0] = nil
|
||||
}
|
||||
writer.Reset()
|
||||
err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data)
|
||||
closeBitrotReaders(bitrotReaders)
|
||||
if err != nil && !test.shouldFailQuorum {
|
||||
t.Errorf("Test %d: should pass but failed with: %v", i, err)
|
||||
}
|
||||
@@ -213,12 +228,12 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
writers := make([]*bitrotWriter, len(disks))
|
||||
writers := make([]io.Writer, len(disks))
|
||||
for i, disk := range disks {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", DefaultBitrotAlgorithm)
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(length), DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
}
|
||||
|
||||
// 10000 iterations with random offsets and lengths.
|
||||
@@ -227,6 +242,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
|
||||
// Create a test file to read from.
|
||||
buffer := make([]byte, blockSize, 2*blockSize)
|
||||
n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -247,15 +263,16 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
|
||||
expected := data[offset : offset+readLen]
|
||||
|
||||
// Get the checksums of the current part.
|
||||
bitrotReaders := make([]*bitrotReader, len(disks))
|
||||
bitrotReaders := make([]io.ReaderAt, len(disks))
|
||||
for index, disk := range disks {
|
||||
if disk == OfflineDisk {
|
||||
continue
|
||||
}
|
||||
endOffset := getErasureShardFileEndOffset(offset, readLen, length, blockSize, erasure.dataBlocks)
|
||||
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", DefaultBitrotAlgorithm, endOffset, writers[index].Sum())
|
||||
tillOffset := erasure.ShardFileTillOffset(offset, readLen, length)
|
||||
bitrotReaders[index] = newStreamingBitrotReader(disk, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
}
|
||||
err = erasure.Decode(context.Background(), buf, bitrotReaders, offset, readLen, length)
|
||||
closeBitrotReaders(bitrotReaders)
|
||||
if err != nil {
|
||||
t.Fatal(err, offset, readLen)
|
||||
}
|
||||
@@ -281,17 +298,18 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64,
|
||||
b.Fatalf("failed to create ErasureStorage: %v", err)
|
||||
}
|
||||
|
||||
writers := make([]*bitrotWriter, len(disks))
|
||||
writers := make([]io.Writer, len(disks))
|
||||
for i, disk := range disks {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", DefaultBitrotAlgorithm)
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
}
|
||||
|
||||
content := make([]byte, size)
|
||||
buffer := make([]byte, blockSizeV1, 2*blockSizeV1)
|
||||
_, err = erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create erasure test file: %v", err)
|
||||
}
|
||||
@@ -307,17 +325,18 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64,
|
||||
b.SetBytes(size)
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bitrotReaders := make([]*bitrotReader, len(disks))
|
||||
bitrotReaders := make([]io.ReaderAt, len(disks))
|
||||
for index, disk := range disks {
|
||||
if writers[index] == nil {
|
||||
continue
|
||||
}
|
||||
endOffset := getErasureShardFileEndOffset(0, size, size, erasure.blockSize, erasure.dataBlocks)
|
||||
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", DefaultBitrotAlgorithm, endOffset, writers[index].Sum())
|
||||
tillOffset := erasure.ShardFileTillOffset(0, size, size)
|
||||
bitrotReaders[index] = newStreamingBitrotReader(disk, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
}
|
||||
if err = erasure.Decode(context.Background(), bytes.NewBuffer(content[:0]), bitrotReaders, 0, size, size); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
closeBitrotReaders(bitrotReaders)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -25,15 +25,15 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
// Writes in parallel to bitrotWriters
|
||||
// Writes in parallel to writers
|
||||
type parallelWriter struct {
|
||||
writers []*bitrotWriter
|
||||
writers []io.Writer
|
||||
writeQuorum int
|
||||
errs []error
|
||||
}
|
||||
|
||||
// Append appends data to bitrotWriters in parallel.
|
||||
func (p *parallelWriter) Append(ctx context.Context, blocks [][]byte) error {
|
||||
// Write writes data to writers in parallel.
|
||||
func (p *parallelWriter) Write(ctx context.Context, blocks [][]byte) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := range p.writers {
|
||||
@@ -45,7 +45,7 @@ func (p *parallelWriter) Append(ctx context.Context, blocks [][]byte) error {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
p.errs[i] = p.writers[i].Append(blocks[i])
|
||||
_, p.errs[i] = p.writers[i].Write(blocks[i])
|
||||
if p.errs[i] != nil {
|
||||
p.writers[i] = nil
|
||||
}
|
||||
@@ -70,7 +70,7 @@ func (p *parallelWriter) Append(ctx context.Context, blocks [][]byte) error {
|
||||
}
|
||||
|
||||
// Encode reads from the reader, erasure-encodes the data and writes to the writers.
|
||||
func (e *Erasure) Encode(ctx context.Context, src io.Reader, writers []*bitrotWriter, buf []byte, quorum int) (total int64, err error) {
|
||||
func (e *Erasure) Encode(ctx context.Context, src io.Reader, writers []io.Writer, buf []byte, quorum int) (total int64, err error) {
|
||||
writer := ¶llelWriter{
|
||||
writers: writers,
|
||||
writeQuorum: quorum,
|
||||
@@ -96,7 +96,7 @@ func (e *Erasure) Encode(ctx context.Context, src io.Reader, writers []*bitrotWr
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err = writer.Append(ctx, blocks); err != nil {
|
||||
if err = writer.Write(ctx, blocks); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -36,6 +36,14 @@ func (a badDisk) AppendFile(volume string, path string, buf []byte) error {
|
||||
return errFaultyDisk
|
||||
}
|
||||
|
||||
func (a badDisk) ReadFileStream(volume, path string, offset, length int64) (io.ReadCloser, error) {
|
||||
return nil, errFaultyDisk
|
||||
}
|
||||
|
||||
func (a badDisk) CreateFile(volume, path string, size int64, reader io.Reader) error {
|
||||
return errFaultyDisk
|
||||
}
|
||||
|
||||
const oneMiByte = 1 * humanize.MiByte
|
||||
|
||||
var erasureEncodeTests = []struct {
|
||||
@@ -87,14 +95,15 @@ func TestErasureEncode(t *testing.T) {
|
||||
setup.Remove()
|
||||
t.Fatalf("Test %d: failed to generate random test data: %v", i, err)
|
||||
}
|
||||
writers := make([]*bitrotWriter, len(disks))
|
||||
writers := make([]io.Writer, len(disks))
|
||||
for i, disk := range disks {
|
||||
if disk == OfflineDisk {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", test.algorithm)
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
|
||||
}
|
||||
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
if err != nil && !test.shouldFail {
|
||||
t.Errorf("Test %d: should pass but failed with: %v", i, err)
|
||||
}
|
||||
@@ -110,20 +119,26 @@ func TestErasureEncode(t *testing.T) {
|
||||
if length := int64(len(data[test.offset:])); n != length {
|
||||
t.Errorf("Test %d: invalid number of bytes written: got: #%d want #%d", i, n, length)
|
||||
}
|
||||
writers := make([]*bitrotWriter, len(disks))
|
||||
writers := make([]io.Writer, len(disks))
|
||||
for i, disk := range disks {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object2", test.algorithm)
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object2", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
|
||||
}
|
||||
for j := range disks[:test.offDisks] {
|
||||
writers[j].disk = badDisk{nil}
|
||||
switch w := writers[j].(type) {
|
||||
case *wholeBitrotWriter:
|
||||
w.disk = badDisk{nil}
|
||||
case *streamingBitrotWriter:
|
||||
w.iow.CloseWithError(errFaultyDisk)
|
||||
}
|
||||
}
|
||||
if test.offDisks > 0 {
|
||||
writers[0] = nil
|
||||
}
|
||||
n, err = erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
if err != nil && !test.shouldFailQuorum {
|
||||
t.Errorf("Test %d: should pass but failed with: %v", i, err)
|
||||
}
|
||||
@@ -167,14 +182,16 @@ func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64,
|
||||
b.SetBytes(size)
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
writers := make([]*bitrotWriter, len(disks))
|
||||
writers := make([]io.Writer, len(disks))
|
||||
for i, disk := range disks {
|
||||
if disk == OfflineDisk {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", DefaultBitrotAlgorithm)
|
||||
disk.DeleteFile("testbucket", "object")
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
}
|
||||
_, err := erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
|
||||
// Heal heals the shard files on non-nil writers. Note that the quorum passed is 1
|
||||
// as healing should continue even if it has been successful healing only one shard file.
|
||||
func (e Erasure) Heal(ctx context.Context, readers []*bitrotReader, writers []*bitrotWriter, size int64) error {
|
||||
func (e Erasure) Heal(ctx context.Context, readers []io.ReaderAt, writers []io.Writer, size int64) error {
|
||||
r, w := io.Pipe()
|
||||
go func() {
|
||||
if err := e.Decode(ctx, w, readers, 0, size, size); err != nil {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -84,20 +85,21 @@ func TestErasureHeal(t *testing.T) {
|
||||
t.Fatalf("Test %d: failed to create random test data: %v", i, err)
|
||||
}
|
||||
buffer := make([]byte, test.blocksize, 2*test.blocksize)
|
||||
writers := make([]*bitrotWriter, len(disks))
|
||||
writers := make([]io.Writer, len(disks))
|
||||
for i, disk := range disks {
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "testobject", test.algorithm)
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize())
|
||||
}
|
||||
_, err = erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
if err != nil {
|
||||
setup.Remove()
|
||||
t.Fatalf("Test %d: failed to create random test data: %v", i, err)
|
||||
}
|
||||
|
||||
readers := make([]*bitrotReader, len(disks))
|
||||
readers := make([]io.ReaderAt, len(disks))
|
||||
for i, disk := range disks {
|
||||
shardFilesize := getErasureShardFileSize(test.blocksize, test.size, erasure.dataBlocks)
|
||||
readers[i] = newBitrotReader(disk, "testbucket", "testobject", test.algorithm, shardFilesize, writers[i].Sum())
|
||||
shardFilesize := erasure.ShardFileSize(test.size)
|
||||
readers[i] = newBitrotReader(disk, "testbucket", "testobject", shardFilesize, test.algorithm, bitrotWriterSum(writers[i]), erasure.ShardSize())
|
||||
}
|
||||
|
||||
// setup stale disks for the test case
|
||||
@@ -111,22 +113,30 @@ func TestErasureHeal(t *testing.T) {
|
||||
}
|
||||
}
|
||||
for j := 0; j < test.badDisks; j++ {
|
||||
readers[test.offDisks+j].disk = badDisk{nil}
|
||||
switch r := readers[test.offDisks+j].(type) {
|
||||
case *streamingBitrotReader:
|
||||
r.disk = badDisk{nil}
|
||||
case *wholeBitrotReader:
|
||||
r.disk = badDisk{nil}
|
||||
}
|
||||
}
|
||||
for j := 0; j < test.badStaleDisks; j++ {
|
||||
staleDisks[j] = badDisk{nil}
|
||||
}
|
||||
|
||||
staleWriters := make([]*bitrotWriter, len(staleDisks))
|
||||
staleWriters := make([]io.Writer, len(staleDisks))
|
||||
for i, disk := range staleDisks {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
staleWriters[i] = newBitrotWriter(disk, "testbucket", "testobject", test.algorithm)
|
||||
os.Remove(pathJoin(disk.String(), "testbucket", "testobject"))
|
||||
staleWriters[i] = newBitrotWriter(disk, "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize())
|
||||
}
|
||||
|
||||
// test case setup is complete - now call Healfile()
|
||||
// test case setup is complete - now call Heal()
|
||||
err = erasure.Heal(context.Background(), readers, staleWriters, test.size)
|
||||
closeBitrotReaders(readers)
|
||||
closeBitrotWriters(staleWriters)
|
||||
if err != nil && !test.shouldFail {
|
||||
t.Errorf("Test %d: should pass but it failed with: %v", i, err)
|
||||
}
|
||||
@@ -140,7 +150,7 @@ func TestErasureHeal(t *testing.T) {
|
||||
if staleWriters[i] == nil {
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(staleWriters[i].Sum(), writers[i].Sum()) {
|
||||
if !bytes.Equal(bitrotWriterSum(staleWriters[i]), bitrotWriterSum(writers[i])) {
|
||||
t.Errorf("Test %d: heal returned different bitrot checksums", i)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/klauspost/reedsolomon"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -82,7 +81,9 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
|
||||
if write < int64(len(block)) {
|
||||
n, err := io.Copy(dst, bytes.NewReader(block[:write]))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
if err != io.ErrClosedPipe {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
totalWritten += n
|
||||
@@ -91,7 +92,8 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
|
||||
// Copy the block.
|
||||
n, err := io.Copy(dst, bytes.NewReader(block))
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "read/write on closed pipe") {
|
||||
// The writer will be closed incase of range queries, which will emit ErrClosedPipe.
|
||||
if err != io.ErrClosedPipe {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return 0, err
|
||||
@@ -107,25 +109,3 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
|
||||
// Success.
|
||||
return totalWritten, nil
|
||||
}
|
||||
|
||||
// Returns shard-file size.
|
||||
func getErasureShardFileSize(blockSize int64, totalLength int64, dataBlocks int) int64 {
|
||||
shardSize := ceilFrac(int64(blockSize), int64(dataBlocks))
|
||||
numShards := totalLength / int64(blockSize)
|
||||
lastBlockSize := totalLength % int64(blockSize)
|
||||
lastShardSize := ceilFrac(lastBlockSize, int64(dataBlocks))
|
||||
return shardSize*numShards + lastShardSize
|
||||
}
|
||||
|
||||
// Returns the endOffset till which bitrotReader should read data using disk.ReadFile()
|
||||
// partOffset, partLength and partSize are values of the object's part file.
|
||||
func getErasureShardFileEndOffset(partOffset int64, partLength int64, partSize int64, erasureBlockSize int64, dataBlocks int) int64 {
|
||||
shardSize := ceilFrac(erasureBlockSize, int64(dataBlocks))
|
||||
shardFileSize := getErasureShardFileSize(erasureBlockSize, partSize, dataBlocks)
|
||||
endShard := (partOffset + int64(partLength)) / erasureBlockSize
|
||||
endOffset := endShard*shardSize + shardSize
|
||||
if endOffset > shardFileSize {
|
||||
endOffset = shardFileSize
|
||||
}
|
||||
return endOffset
|
||||
}
|
||||
|
||||
@@ -32,18 +32,16 @@ type Erasure struct {
|
||||
|
||||
// NewErasure creates a new ErasureStorage.
|
||||
func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (e Erasure, err error) {
|
||||
shardsize := int(ceilFrac(blockSize, int64(dataBlocks)))
|
||||
erasure, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(shardsize))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return e, err
|
||||
}
|
||||
e = Erasure{
|
||||
encoder: erasure,
|
||||
dataBlocks: dataBlocks,
|
||||
parityBlocks: parityBlocks,
|
||||
blockSize: blockSize,
|
||||
}
|
||||
e.encoder, err = reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(int(e.ShardSize())))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return e, err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -94,3 +92,31 @@ func (e *Erasure) DecodeDataAndParityBlocks(ctx context.Context, data [][]byte)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShardSize - returns actual shared size from erasure blockSize.
|
||||
func (e *Erasure) ShardSize() int64 {
|
||||
return ceilFrac(e.blockSize, int64(e.dataBlocks))
|
||||
}
|
||||
|
||||
// ShardFileSize - returns final erasure size from original size.
|
||||
func (e *Erasure) ShardFileSize(totalLength int64) int64 {
|
||||
if totalLength == 0 {
|
||||
return 0
|
||||
}
|
||||
numShards := totalLength / e.blockSize
|
||||
lastBlockSize := totalLength % int64(e.blockSize)
|
||||
lastShardSize := ceilFrac(lastBlockSize, int64(e.dataBlocks))
|
||||
return numShards*e.ShardSize() + lastShardSize
|
||||
}
|
||||
|
||||
// ShardFileTillOffset - returns the effectiv eoffset where erasure reading begins.
|
||||
func (e *Erasure) ShardFileTillOffset(startOffset, length, totalLength int64) int64 {
|
||||
shardSize := e.ShardSize()
|
||||
shardFileSize := e.ShardFileSize(totalLength)
|
||||
endShard := (startOffset + int64(length)) / e.blockSize
|
||||
tillOffset := endShard*shardSize + shardSize
|
||||
if tillOffset > shardFileSize {
|
||||
tillOffset = shardFileSize
|
||||
}
|
||||
return tillOffset
|
||||
}
|
||||
|
||||
@@ -273,7 +273,7 @@ func initFormatFS(ctx context.Context, fsPath string) (rlk *lock.RLockedFile, er
|
||||
rlk.Close()
|
||||
return nil, err
|
||||
}
|
||||
logger.SetDeploymentID(id)
|
||||
globalDeploymentID = id
|
||||
return rlk, nil
|
||||
}
|
||||
}
|
||||
@@ -333,29 +333,43 @@ func formatFSFixDeploymentID(fsFormatPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
wlk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR, 0)
|
||||
formatStartTime := time.Now().Round(time.Second)
|
||||
getElapsedTime := func() string {
|
||||
return time.Now().Round(time.Second).Sub(formatStartTime).String()
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
|
||||
var wlk *lock.LockedFile
|
||||
for range newRetryTimerSimple(doneCh) {
|
||||
wlk, err = lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR, 0)
|
||||
if err == lock.ErrAlreadyLocked {
|
||||
// Lock already present, sleep and attempt again.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
// Lock already present, sleep and attempt again
|
||||
logger.Info("Another minio process(es) might be holding a lock to the file %s. Please kill that minio process(es) (elapsed %s)\n", fsFormatPath, getElapsedTime())
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer wlk.Close()
|
||||
|
||||
err = jsonLoad(wlk, format)
|
||||
if err != nil {
|
||||
return err
|
||||
break
|
||||
}
|
||||
|
||||
// Check if it needs to be updated
|
||||
if err = jsonLoad(wlk, format); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Check if format needs to be updated
|
||||
if format.ID != "" {
|
||||
return nil
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
format.ID = mustGetUUID()
|
||||
return jsonSave(wlk, format)
|
||||
}
|
||||
|
||||
format.ID = mustGetUUID()
|
||||
if err = jsonSave(wlk, format); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if wlk != nil {
|
||||
wlk.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -379,6 +379,23 @@ func saveFormatXL(disk StorageAPI, format interface{}) error {
|
||||
return disk.RenameFile(minioMetaBucket, formatConfigFileTmp, minioMetaBucket, formatConfigFile)
|
||||
}
|
||||
|
||||
var ignoredHiddenDirectories = []string{
|
||||
minioMetaBucket,
|
||||
".snapshot",
|
||||
"lost+found",
|
||||
"$RECYCLE.BIN",
|
||||
"System Volume Information",
|
||||
}
|
||||
|
||||
func isIgnoreHiddenDirectories(dir string) bool {
|
||||
for _, ignDir := range ignoredHiddenDirectories {
|
||||
if dir == ignDir {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// loadFormatXL - loads format.json from disk.
|
||||
func loadFormatXL(disk StorageAPI) (format *formatXLV3, err error) {
|
||||
buf, err := disk.ReadAll(minioMetaBucket, formatConfigFile)
|
||||
@@ -391,9 +408,7 @@ func loadFormatXL(disk StorageAPI) (format *formatXLV3, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vols) > 1 || (len(vols) == 1 &&
|
||||
vols[0].Name != minioMetaBucket &&
|
||||
vols[0].Name != "lost+found") {
|
||||
if len(vols) > 1 || (len(vols) == 1 && !isIgnoreHiddenDirectories(vols[0].Name)) {
|
||||
// 'format.json' not found, but we
|
||||
// found user data.
|
||||
return nil, errCorruptedFormat
|
||||
@@ -881,6 +896,7 @@ func newHealFormatSets(refFormat *formatXLV3, setCount, disksPerSet int, formats
|
||||
if errs[i*disksPerSet+j] == errUnformattedDisk || errs[i*disksPerSet+j] == nil {
|
||||
newFormats[i][j] = &formatXLV3{}
|
||||
newFormats[i][j].Version = refFormat.Version
|
||||
newFormats[i][j].ID = refFormat.ID
|
||||
newFormats[i][j].Format = refFormat.Format
|
||||
newFormats[i][j].XL.Version = refFormat.XL.Version
|
||||
newFormats[i][j].XL.DistributionAlgo = refFormat.XL.DistributionAlgo
|
||||
|
||||
@@ -522,7 +522,7 @@ func TestGetXLID(t *testing.T) {
|
||||
}
|
||||
|
||||
formats[2].ID = "bad-id"
|
||||
if id, err = formatXLGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat {
|
||||
if _, err = formatXLGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat {
|
||||
t.Fatal("Unexpected Success")
|
||||
}
|
||||
}
|
||||
@@ -556,4 +556,13 @@ func TestNewFormatSets(t *testing.T) {
|
||||
if newFormats == nil {
|
||||
t.Fatal("Unexpected failure")
|
||||
}
|
||||
|
||||
// Check if deployment IDs are preserved.
|
||||
for i := range newFormats {
|
||||
for j := range newFormats[i] {
|
||||
if newFormats[i][j].ID != quorumFormat.ID {
|
||||
t.Fatal("Deployment id in the new format is lost")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,7 +294,7 @@ func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadClos
|
||||
|
||||
// Seek to the requested offset.
|
||||
if offset > 0 {
|
||||
_, err = fr.Seek(offset, os.SEEK_SET)
|
||||
_, err = fr.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, 0, err
|
||||
|
||||
@@ -22,9 +22,10 @@ import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
pathutil "path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
@@ -114,7 +115,7 @@ type fsMetaV1 struct {
|
||||
// Metadata map for current object.
|
||||
Meta map[string]string `json:"meta,omitempty"`
|
||||
// parts info for current object - used in encryption.
|
||||
Parts []objectPartInfo `json:"parts,omitempty"`
|
||||
Parts []ObjectPartInfo `json:"parts,omitempty"`
|
||||
}
|
||||
|
||||
// IsValid - tells if the format is sane by validating the version
|
||||
@@ -137,11 +138,7 @@ func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo
|
||||
|
||||
// Guess content-type from the extension if possible.
|
||||
if m.Meta["content-type"] == "" {
|
||||
if objectExt := pathutil.Ext(object); objectExt != "" {
|
||||
if content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]; ok {
|
||||
m.Meta["content-type"] = content.ContentType
|
||||
}
|
||||
}
|
||||
m.Meta["content-type"] = mimedb.TypeByExtension(pathutil.Ext(object))
|
||||
}
|
||||
|
||||
if hasSuffix(object, slashSeparator) {
|
||||
@@ -174,7 +171,15 @@ func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo
|
||||
} else {
|
||||
objInfo.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
|
||||
var (
|
||||
t time.Time
|
||||
e error
|
||||
)
|
||||
if exp, ok := m.Meta["expires"]; ok {
|
||||
if t, e = time.Parse(http.TimeFormat, exp); e == nil {
|
||||
objInfo.Expires = t.UTC()
|
||||
}
|
||||
}
|
||||
// etag/md5Sum has already been extracted. We need to
|
||||
// remove to avoid it from appearing as part of
|
||||
// response headers. e.g, X-Minio-* or X-Amz-*.
|
||||
@@ -212,9 +217,9 @@ func parseFSMetaMap(fsMetaBuf []byte) map[string]string {
|
||||
return metaMap
|
||||
}
|
||||
|
||||
func parseFSPartsArray(fsMetaBuf []byte) []objectPartInfo {
|
||||
func parseFSPartsArray(fsMetaBuf []byte) []ObjectPartInfo {
|
||||
// Get xlMetaV1.Parts array
|
||||
var partsArray []objectPartInfo
|
||||
var partsArray []ObjectPartInfo
|
||||
|
||||
partsArrayResult := gjson.GetBytes(fsMetaBuf, "parts")
|
||||
partsArrayResult.ForEach(func(key, part gjson.Result) bool {
|
||||
@@ -223,11 +228,13 @@ func parseFSPartsArray(fsMetaBuf []byte) []objectPartInfo {
|
||||
name := gjson.Get(partJSON, "name").String()
|
||||
etag := gjson.Get(partJSON, "etag").String()
|
||||
size := gjson.Get(partJSON, "size").Int()
|
||||
partsArray = append(partsArray, objectPartInfo{
|
||||
Number: int(number),
|
||||
Name: name,
|
||||
ETag: etag,
|
||||
Size: size,
|
||||
actualSize := gjson.Get(partJSON, "actualSize").Int()
|
||||
partsArray = append(partsArray, ObjectPartInfo{
|
||||
Number: int(number),
|
||||
Name: name,
|
||||
ETag: etag,
|
||||
Size: size,
|
||||
ActualSize: int64(actualSize),
|
||||
})
|
||||
return true
|
||||
})
|
||||
|
||||
@@ -38,6 +38,9 @@ func TestFSV1MetadataObjInfo(t *testing.T) {
|
||||
if objInfo.IsDir {
|
||||
t.Fatal("Unexpected object info value for IsDir", objInfo.IsDir)
|
||||
}
|
||||
if !objInfo.Expires.IsZero() {
|
||||
t.Fatal("Unexpected object info value for Expires ", objInfo.Expires)
|
||||
}
|
||||
}
|
||||
|
||||
// TestReadFSMetadata - readFSMetadata testing with a healthy and faulty disk
|
||||
@@ -54,7 +57,7 @@ func TestReadFSMetadata(t *testing.T) {
|
||||
if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{}); err != nil {
|
||||
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
|
||||
@@ -89,7 +92,7 @@ func TestWriteFSMetadata(t *testing.T) {
|
||||
if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{}); err != nil {
|
||||
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user